xref: /netbsd-src/sys/dev/pci/if_bwfm_pci.c (revision f3cfa6f6ce31685c6c4a758bc430e69eb99f50a4)
1 /*	$NetBSD: if_bwfm_pci.c,v 1.6 2018/12/09 11:14:02 jdolecek Exp $	*/
2 /*	$OpenBSD: if_bwfm_pci.c,v 1.18 2018/02/08 05:00:38 patrick Exp $	*/
3 /*
4  * Copyright (c) 2010-2016 Broadcom Corporation
5  * Copyright (c) 2017 Patrick Wildt <patrick@blueri.se>
6  *
7  * Permission to use, copy, modify, and/or distribute this software for any
8  * purpose with or without fee is hereby granted, provided that the above
9  * copyright notice and this permission notice appear in all copies.
10  *
11  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
12  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
13  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
14  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
15  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
16  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
17  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
18  */
19 
20 #include <sys/param.h>
21 #include <sys/systm.h>
22 #include <sys/buf.h>
23 #include <sys/kernel.h>
24 #include <sys/kmem.h>
25 #include <sys/device.h>
26 #include <sys/pool.h>
27 #include <sys/workqueue.h>
28 #include <sys/socket.h>
29 
30 #include <net/bpf.h>
31 #include <net/if.h>
32 #include <net/if_dl.h>
33 #include <net/if_ether.h>
34 #include <net/if_media.h>
35 
36 #include <netinet/in.h>
37 
38 #include <net80211/ieee80211_var.h>
39 
40 #include <dev/firmload.h>
41 
42 #include <dev/pci/pcireg.h>
43 #include <dev/pci/pcivar.h>
44 #include <dev/pci/pcidevs.h>
45 
46 #include <dev/ic/bwfmvar.h>
47 #include <dev/ic/bwfmreg.h>
48 #include <dev/pci/if_bwfm_pci.h>
49 
50 #define BWFM_DMA_D2H_SCRATCH_BUF_LEN		8
51 #define BWFM_DMA_D2H_RINGUPD_BUF_LEN		1024
52 #define BWFM_DMA_H2D_IOCTL_BUF_LEN		ETHER_MAX_LEN
53 
54 #define BWFM_NUM_TX_MSGRINGS			2
55 #define BWFM_NUM_RX_MSGRINGS			3
56 
57 #define BWFM_NUM_TX_PKTIDS			2048
58 #define BWFM_NUM_RX_PKTIDS			1024
59 
60 #define BWFM_NUM_TX_DESCS			1
61 #define BWFM_NUM_RX_DESCS			1
62 
63 #ifdef BWFM_DEBUG
64 #define DPRINTF(x)	do { if (bwfm_debug > 0) printf x; } while (0)
65 #define DPRINTFN(n, x)	do { if (bwfm_debug >= (n)) printf x; } while (0)
66 static int bwfm_debug = 2;
67 #else
68 #define DPRINTF(x)	do { ; } while (0)
69 #define DPRINTFN(n, x)	do { ; } while (0)
70 #endif
71 
72 #define DEVNAME(sc)	device_xname((sc)->sc_sc.sc_dev)
73 #define letoh16		htole16
74 #define letoh32		htole32
75 #define nitems(x)	__arraycount(x)
76 
77 enum ring_status {
78 	RING_CLOSED,
79 	RING_CLOSING,
80 	RING_OPEN,
81 	RING_OPENING,
82 };
83 
84 struct bwfm_pci_msgring {
85 	uint32_t		 w_idx_addr;
86 	uint32_t		 r_idx_addr;
87 	uint32_t		 w_ptr;
88 	uint32_t		 r_ptr;
89 	int			 nitem;
90 	int			 itemsz;
91 	enum ring_status	 status;
92 	struct bwfm_pci_dmamem	*ring;
93 	struct mbuf		*m;
94 
95 	int			 fifo;
96 	uint8_t			 mac[ETHER_ADDR_LEN];
97 };
98 
99 struct bwfm_pci_buf {
100 	bus_dmamap_t	 bb_map;
101 	struct mbuf	*bb_m;
102 };
103 
104 struct bwfm_pci_pkts {
105 	struct bwfm_pci_buf	*pkts;
106 	uint32_t		 npkt;
107 	int			 last;
108 };
109 
110 struct if_rxring {
111 	u_int	rxr_total;
112 	u_int	rxr_inuse;
113 };
114 
115 struct bwfm_cmd_flowring_create {
116 	struct work		 wq_cookie;
117 	struct bwfm_pci_softc	*sc;
118 	struct mbuf		*m;
119 	int			 flowid;
120 	int			 prio;
121 };
122 
123 struct bwfm_pci_softc {
124 	struct bwfm_softc	 sc_sc;
125 	pci_chipset_tag_t	 sc_pc;
126 	pcitag_t		 sc_tag;
127 	pcireg_t		 sc_id;
128 	void			*sc_ih;
129 	pci_intr_handle_t	*sc_pihp;
130 
131 	bus_space_tag_t		 sc_reg_iot;
132 	bus_space_handle_t	 sc_reg_ioh;
133 	bus_size_t		 sc_reg_ios;
134 
135 	bus_space_tag_t		 sc_tcm_iot;
136 	bus_space_handle_t	 sc_tcm_ioh;
137 	bus_size_t		 sc_tcm_ios;
138 
139 	bus_dma_tag_t		 sc_dmat;
140 
141 	uint32_t		 sc_shared_address;
142 	uint32_t		 sc_shared_flags;
143 	uint8_t			 sc_shared_version;
144 
145 	uint8_t			 sc_dma_idx_sz;
146 	struct bwfm_pci_dmamem	*sc_dma_idx_buf;
147 	size_t			 sc_dma_idx_bufsz;
148 
149 	uint16_t		 sc_max_rxbufpost;
150 	uint32_t		 sc_rx_dataoffset;
151 	uint32_t		 sc_htod_mb_data_addr;
152 	uint32_t		 sc_dtoh_mb_data_addr;
153 	uint32_t		 sc_ring_info_addr;
154 
155 	uint32_t		 sc_console_base_addr;
156 	uint32_t		 sc_console_buf_addr;
157 	uint32_t		 sc_console_buf_size;
158 	uint32_t		 sc_console_readidx;
159 
160 	struct pool		 sc_flowring_pool;
161 	struct workqueue	*flowring_wq;
162 
163 	uint16_t		 sc_max_flowrings;
164 	uint16_t		 sc_max_submissionrings;
165 	uint16_t		 sc_max_completionrings;
166 
167 	struct bwfm_pci_msgring	 sc_ctrl_submit;
168 	struct bwfm_pci_msgring	 sc_rxpost_submit;
169 	struct bwfm_pci_msgring	 sc_ctrl_complete;
170 	struct bwfm_pci_msgring	 sc_tx_complete;
171 	struct bwfm_pci_msgring	 sc_rx_complete;
172 	struct bwfm_pci_msgring	*sc_flowrings;
173 
174 	struct bwfm_pci_dmamem	*sc_scratch_buf;
175 	struct bwfm_pci_dmamem	*sc_ringupd_buf;
176 
177 	struct bwfm_pci_dmamem	*sc_ioctl_buf;
178 	int			 sc_ioctl_reqid;
179 	uint32_t		 sc_ioctl_resp_pktid;
180 	uint32_t		 sc_ioctl_resp_ret_len;
181 	uint32_t		 sc_ioctl_resp_status;
182 	int			 sc_ioctl_poll;
183 
184 	struct if_rxring	 sc_ioctl_ring;
185 	struct if_rxring	 sc_event_ring;
186 	struct if_rxring	 sc_rxbuf_ring;
187 
188 	struct bwfm_pci_pkts	 sc_rx_pkts;
189 	struct bwfm_pci_pkts	 sc_tx_pkts;
190 	int			 sc_tx_pkts_full;
191 };
192 
193 struct bwfm_pci_dmamem {
194 	bus_dmamap_t		bdm_map;
195 	bus_dma_segment_t	bdm_seg;
196 	size_t			bdm_size;
197 	char *			bdm_kva;
198 };
199 
200 #define BWFM_PCI_DMA_MAP(_bdm)	((_bdm)->bdm_map)
201 #define BWFM_PCI_DMA_LEN(_bdm)	((_bdm)->bdm_size)
202 #define BWFM_PCI_DMA_DVA(_bdm)	(uint64_t)((_bdm)->bdm_map->dm_segs[0].ds_addr)
203 #define BWFM_PCI_DMA_KVA(_bdm)	((_bdm)->bdm_kva)
204 
205 static u_int	 if_rxr_get(struct if_rxring *rxr, unsigned int max);
206 static void	 if_rxr_put(struct if_rxring *rxr, unsigned int n);
207 static void	 if_rxr_init(struct if_rxring *rxr, unsigned int lwm, unsigned int hwm);
208 
209 int		 bwfm_pci_match(device_t parent, cfdata_t match, void *aux);
210 void		 bwfm_pci_attachhook(device_t);
211 void		 bwfm_pci_attach(device_t, device_t, void *);
212 int		 bwfm_pci_detach(device_t, int);
213 
214 int		 bwfm_pci_intr(void *);
215 void		 bwfm_pci_intr_enable(struct bwfm_pci_softc *);
216 void		 bwfm_pci_intr_disable(struct bwfm_pci_softc *);
217 int		 bwfm_pci_load_microcode(struct bwfm_pci_softc *, const u_char *,
218 		    size_t);
219 void		 bwfm_pci_select_core(struct bwfm_pci_softc *, int );
220 
221 struct bwfm_pci_dmamem *
222 		 bwfm_pci_dmamem_alloc(struct bwfm_pci_softc *, bus_size_t,
223 		    bus_size_t);
224 void		 bwfm_pci_dmamem_free(struct bwfm_pci_softc *, struct bwfm_pci_dmamem *);
225 int		 bwfm_pci_pktid_avail(struct bwfm_pci_softc *,
226 		    struct bwfm_pci_pkts *);
227 int		 bwfm_pci_pktid_new(struct bwfm_pci_softc *,
228 		    struct bwfm_pci_pkts *, struct mbuf **,
229 		    uint32_t *, paddr_t *);
230 struct mbuf *	 bwfm_pci_pktid_free(struct bwfm_pci_softc *,
231 		    struct bwfm_pci_pkts *, uint32_t);
232 void		 bwfm_pci_fill_rx_ioctl_ring(struct bwfm_pci_softc *,
233 		    struct if_rxring *, uint32_t);
234 void		 bwfm_pci_fill_rx_buf_ring(struct bwfm_pci_softc *);
235 void		 bwfm_pci_fill_rx_rings(struct bwfm_pci_softc *);
236 int		 bwfm_pci_setup_ring(struct bwfm_pci_softc *, struct bwfm_pci_msgring *,
237 		    int, size_t, uint32_t, uint32_t, int, uint32_t, uint32_t *);
238 int		 bwfm_pci_setup_flowring(struct bwfm_pci_softc *, struct bwfm_pci_msgring *,
239 		    int, size_t);
240 
241 void		 bwfm_pci_ring_bell(struct bwfm_pci_softc *,
242 		    struct bwfm_pci_msgring *);
243 void		 bwfm_pci_ring_update_rptr(struct bwfm_pci_softc *,
244 		    struct bwfm_pci_msgring *);
245 void		 bwfm_pci_ring_update_wptr(struct bwfm_pci_softc *,
246 		    struct bwfm_pci_msgring *);
247 void		 bwfm_pci_ring_write_rptr(struct bwfm_pci_softc *,
248 		    struct bwfm_pci_msgring *);
249 void		 bwfm_pci_ring_write_wptr(struct bwfm_pci_softc *,
250 		    struct bwfm_pci_msgring *);
251 void *		 bwfm_pci_ring_write_reserve(struct bwfm_pci_softc *,
252 		    struct bwfm_pci_msgring *);
253 void *		 bwfm_pci_ring_write_reserve_multi(struct bwfm_pci_softc *,
254 		    struct bwfm_pci_msgring *, int, int *);
255 void *		 bwfm_pci_ring_read_avail(struct bwfm_pci_softc *,
256 		    struct bwfm_pci_msgring *, int *);
257 void		 bwfm_pci_ring_read_commit(struct bwfm_pci_softc *,
258 		    struct bwfm_pci_msgring *, int);
259 void		 bwfm_pci_ring_write_commit(struct bwfm_pci_softc *,
260 		    struct bwfm_pci_msgring *);
261 void		 bwfm_pci_ring_write_cancel(struct bwfm_pci_softc *,
262 		    struct bwfm_pci_msgring *, int);
263 
264 void		 bwfm_pci_ring_rx(struct bwfm_pci_softc *,
265 		    struct bwfm_pci_msgring *);
266 void		 bwfm_pci_msg_rx(struct bwfm_pci_softc *, void *);
267 
268 uint32_t	 bwfm_pci_buscore_read(struct bwfm_softc *, uint32_t);
269 void		 bwfm_pci_buscore_write(struct bwfm_softc *, uint32_t,
270 		    uint32_t);
271 int		 bwfm_pci_buscore_prepare(struct bwfm_softc *);
272 int		 bwfm_pci_buscore_reset(struct bwfm_softc *);
273 void		 bwfm_pci_buscore_activate(struct bwfm_softc *, const uint32_t);
274 
275 int		 bwfm_pci_flowring_lookup(struct bwfm_pci_softc *,
276 		     struct mbuf *);
277 void		 bwfm_pci_flowring_create(struct bwfm_pci_softc *,
278 		     struct mbuf *);
279 void		 bwfm_pci_flowring_create_cb(struct work *, void *);
280 void		 bwfm_pci_flowring_delete(struct bwfm_pci_softc *, int);
281 
282 void		 bwfm_pci_stop(struct bwfm_softc *);
283 int		 bwfm_pci_txcheck(struct bwfm_softc *);
284 int		 bwfm_pci_txdata(struct bwfm_softc *, struct mbuf **);
285 
286 #ifdef BWFM_DEBUG
287 void		 bwfm_pci_debug_console(struct bwfm_pci_softc *);
288 #endif
289 
290 int		 bwfm_pci_msgbuf_query_dcmd(struct bwfm_softc *, int,
291 		    int, char *, size_t *);
292 int		 bwfm_pci_msgbuf_set_dcmd(struct bwfm_softc *, int,
293 		    int, char *, size_t);
294 
295 struct bwfm_buscore_ops bwfm_pci_buscore_ops = {
296 	.bc_read = bwfm_pci_buscore_read,
297 	.bc_write = bwfm_pci_buscore_write,
298 	.bc_prepare = bwfm_pci_buscore_prepare,
299 	.bc_reset = bwfm_pci_buscore_reset,
300 	.bc_setup = NULL,
301 	.bc_activate = bwfm_pci_buscore_activate,
302 };
303 
304 struct bwfm_bus_ops bwfm_pci_bus_ops = {
305 	.bs_init = NULL,
306 	.bs_stop = bwfm_pci_stop,
307 	.bs_txcheck = bwfm_pci_txcheck,
308 	.bs_txdata = bwfm_pci_txdata,
309 	.bs_txctl = NULL,
310 	.bs_rxctl = NULL,
311 };
312 
313 struct bwfm_proto_ops bwfm_pci_msgbuf_ops = {
314 	.proto_query_dcmd = bwfm_pci_msgbuf_query_dcmd,
315 	.proto_set_dcmd = bwfm_pci_msgbuf_set_dcmd,
316 };
317 
318 
319 CFATTACH_DECL_NEW(bwfm_pci, sizeof(struct bwfm_pci_softc),
320     bwfm_pci_match, bwfm_pci_attach, bwfm_pci_detach, NULL);
321 
322 static const struct bwfm_pci_matchid {
323 	pci_vendor_id_t		bwfm_vendor;
324 	pci_product_id_t	bwfm_product;
325 } bwfm_pci_devices[] = {
326 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM43602 },
327 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM4350 },
328 };
329 
330 static struct mbuf *
331 MCLGETI(struct bwfm_pci_softc *sc __unused, int how,
332     struct ifnet *ifp __unused, u_int size)
333 {
334 	struct mbuf *m;
335 
336 	MGETHDR(m, how, MT_DATA);
337 	if (m == NULL)
338 		return NULL;
339 
340 	MEXTMALLOC(m, size, how);
341 	if ((m->m_flags & M_EXT) == 0) {
342 		m_freem(m);
343 		return NULL;
344 	}
345 	return m;
346 }
347 
348 int
349 bwfm_pci_match(device_t parent, cfdata_t match, void *aux)
350 {
351 	struct pci_attach_args *pa = aux;
352 
353 	if (PCI_VENDOR(pa->pa_id) != PCI_VENDOR_BROADCOM)
354 		return 0;
355 
356 	for (size_t i = 0; i < __arraycount(bwfm_pci_devices); i++)
357 		if (PCI_PRODUCT(pa->pa_id) == bwfm_pci_devices[i].bwfm_product)
358 			return 1;
359 
360 	return 0;
361 }
362 
363 void
364 bwfm_pci_attach(device_t parent, device_t self, void *aux)
365 {
366 	struct bwfm_pci_softc *sc = device_private(self);
367 	struct pci_attach_args *pa = (struct pci_attach_args *)aux;
368 	const char *intrstr;
369 	char intrbuf[PCI_INTRSTR_LEN];
370 
371 	sc->sc_sc.sc_dev = self;
372 
373 	if (pci_mapreg_map(pa, PCI_MAPREG_START + 0x00,
374 	    PCI_MAPREG_MEM_TYPE_64BIT, 0, &sc->sc_reg_iot, &sc->sc_reg_ioh,
375 	    NULL, &sc->sc_reg_ios)) {
376 		printf(": can't map bar0\n");
377 		return;
378 	}
379 
380 	if (pci_mapreg_map(pa, PCI_MAPREG_START + 0x08,
381 	    PCI_MAPREG_MEM_TYPE_64BIT, 0, &sc->sc_tcm_iot, &sc->sc_tcm_ioh,
382 	    NULL, &sc->sc_tcm_ios)) {
383 		printf(": can't map bar1\n");
384 		goto bar0;
385 	}
386 
387 	sc->sc_pc = pa->pa_pc;
388 	sc->sc_tag = pa->pa_tag;
389 	sc->sc_id = pa->pa_id;
390 
391 	if (pci_dma64_available(pa))
392 		sc->sc_dmat = pa->pa_dmat64;
393 	else
394 		sc->sc_dmat = pa->pa_dmat;
395 
396 	/* Map and establish the interrupt. */
397 	if (pci_intr_alloc(pa, &sc->sc_pihp, NULL, 0) != 0) {
398 		printf(": couldn't map interrupt\n");
399 		goto bar1;
400 	}
401 	intrstr = pci_intr_string(pa->pa_pc, sc->sc_pihp[0], intrbuf, sizeof(intrbuf));
402 
403 	sc->sc_ih = pci_intr_establish_xname(pa->pa_pc, sc->sc_pihp[0], IPL_NET,
404 	    bwfm_pci_intr, sc, device_xname(self));
405 	if (sc->sc_ih == NULL) {
406 		printf(": couldn't establish interrupt");
407 		if (intrstr != NULL)
408 			printf(" at %s", intrstr);
409 		printf("\n");
410 		goto bar1;
411 	}
412 	printf(": %s\n", intrstr);
413 
414 	config_mountroot(self, bwfm_pci_attachhook);
415 	return;
416 
417 bar1:
418 	bus_space_unmap(sc->sc_tcm_iot, sc->sc_tcm_ioh, sc->sc_tcm_ios);
419 bar0:
420 	bus_space_unmap(sc->sc_reg_iot, sc->sc_reg_ioh, sc->sc_reg_ios);
421 }
422 
423 void
424 bwfm_pci_attachhook(device_t self)
425 {
426 	struct bwfm_pci_softc *sc = device_private(self);
427 	struct bwfm_softc *bwfm = (void *)sc;
428 	struct bwfm_pci_ringinfo ringinfo;
429 	const char *name = NULL;
430 	firmware_handle_t fwh;
431 	u_char *ucode; size_t size;
432 	uint32_t d2h_w_idx_ptr, d2h_r_idx_ptr;
433 	uint32_t h2d_w_idx_ptr, h2d_r_idx_ptr;
434 	uint32_t idx_offset, reg;
435 	int i;
436 	int error;
437 
438 	sc->sc_sc.sc_buscore_ops = &bwfm_pci_buscore_ops;
439 	if (bwfm_chip_attach(&sc->sc_sc) != 0) {
440 		printf("%s: cannot attach chip\n", DEVNAME(sc));
441 		return;
442 	}
443 
444 	bwfm_pci_select_core(sc, BWFM_AGENT_CORE_PCIE2);
445 	bus_space_write_4(sc->sc_reg_iot, sc->sc_reg_ioh,
446 	    BWFM_PCI_PCIE2REG_CONFIGADDR, 0x4e0);
447 	reg = bus_space_read_4(sc->sc_reg_iot, sc->sc_reg_ioh,
448 	    BWFM_PCI_PCIE2REG_CONFIGDATA);
449 	bus_space_write_4(sc->sc_reg_iot, sc->sc_reg_ioh,
450 	    BWFM_PCI_PCIE2REG_CONFIGDATA, reg);
451 
452 	switch (bwfm->sc_chip.ch_chip)
453 	{
454 	case BRCM_CC_4350_CHIP_ID:
455 		if (bwfm->sc_chip.ch_chiprev > 7)
456 			name = "brcmfmac4350-pcie.bin";
457 		else
458 			name = "brcmfmac4350c2-pcie.bin";
459 		break;
460 	case BRCM_CC_43602_CHIP_ID:
461 		name = "brcmfmac43602-pcie.bin";
462 		break;
463 	default:
464 		printf("%s: unknown firmware for chip %s\n",
465 		    DEVNAME(sc), bwfm->sc_chip.ch_name);
466 		return;
467 	}
468 
469 	if (firmware_open("if_bwfm", name, &fwh) != 0) {
470 		printf("%s: failed firmware_open of file %s\n",
471 		    DEVNAME(sc), name);
472 		return;
473 	}
474 	size = firmware_get_size(fwh);
475 	ucode = firmware_malloc(size);
476 	if (ucode == NULL) {
477 		printf("%s: failed to allocate firmware memory\n",
478 		    DEVNAME(sc));
479 		firmware_close(fwh);
480 		return;
481 	}
482 	error = firmware_read(fwh, 0, ucode, size);
483 	firmware_close(fwh);
484 	if (error != 0) {
485 		printf("%s: failed to read firmware (error %d)\n",
486 		    DEVNAME(sc), error);
487 		firmware_free(ucode, size);
488 		return;
489 	}
490 
491 	/* Retrieve RAM size from firmware. */
492 	if (size >= BWFM_RAMSIZE + 8) {
493 		uint32_t *ramsize = (uint32_t *)&ucode[BWFM_RAMSIZE];
494 		if (letoh32(ramsize[0]) == BWFM_RAMSIZE_MAGIC)
495 			bwfm->sc_chip.ch_ramsize = letoh32(ramsize[1]);
496 	}
497 
498 	if (bwfm_pci_load_microcode(sc, ucode, size) != 0) {
499 		printf("%s: could not load microcode\n",
500 		    DEVNAME(sc));
501 		kmem_free(ucode, size);
502 		return;
503 	}
504 
505 	firmware_free(ucode, size);
506 
507 	sc->sc_shared_flags = bus_space_read_4(sc->sc_tcm_iot, sc->sc_tcm_ioh,
508 	    sc->sc_shared_address + BWFM_SHARED_INFO);
509 	sc->sc_shared_version = sc->sc_shared_flags;
510 	if (sc->sc_shared_version > BWFM_SHARED_INFO_MAX_VERSION ||
511 	    sc->sc_shared_version < BWFM_SHARED_INFO_MIN_VERSION) {
512 		printf("%s: PCIe version %d unsupported\n",
513 		    DEVNAME(sc), sc->sc_shared_version);
514 		return;
515 	}
516 
517 	if (sc->sc_shared_flags & BWFM_SHARED_INFO_DMA_INDEX) {
518 		if (sc->sc_shared_flags & BWFM_SHARED_INFO_DMA_2B_IDX)
519 			sc->sc_dma_idx_sz = sizeof(uint16_t);
520 		else
521 			sc->sc_dma_idx_sz = sizeof(uint32_t);
522 	}
523 
524 	/* Maximum RX data buffers in the ring. */
525 	sc->sc_max_rxbufpost = bus_space_read_2(sc->sc_tcm_iot, sc->sc_tcm_ioh,
526 	    sc->sc_shared_address + BWFM_SHARED_MAX_RXBUFPOST);
527 	if (sc->sc_max_rxbufpost == 0)
528 		sc->sc_max_rxbufpost = BWFM_SHARED_MAX_RXBUFPOST_DEFAULT;
529 
530 	/* Alternative offset of data in a packet */
531 	sc->sc_rx_dataoffset = bus_space_read_4(sc->sc_tcm_iot, sc->sc_tcm_ioh,
532 	    sc->sc_shared_address + BWFM_SHARED_RX_DATAOFFSET);
533 
534 	/* For Power Management */
535 	sc->sc_htod_mb_data_addr = bus_space_read_4(sc->sc_tcm_iot, sc->sc_tcm_ioh,
536 	    sc->sc_shared_address + BWFM_SHARED_HTOD_MB_DATA_ADDR);
537 	sc->sc_dtoh_mb_data_addr = bus_space_read_4(sc->sc_tcm_iot, sc->sc_tcm_ioh,
538 	    sc->sc_shared_address + BWFM_SHARED_DTOH_MB_DATA_ADDR);
539 
540 	/* Ring information */
541 	sc->sc_ring_info_addr = bus_space_read_4(sc->sc_tcm_iot, sc->sc_tcm_ioh,
542 	    sc->sc_shared_address + BWFM_SHARED_RING_INFO_ADDR);
543 
544 	/* Firmware's "dmesg" */
545 	sc->sc_console_base_addr = bus_space_read_4(sc->sc_tcm_iot, sc->sc_tcm_ioh,
546 	    sc->sc_shared_address + BWFM_SHARED_CONSOLE_ADDR);
547 	sc->sc_console_buf_addr = bus_space_read_4(sc->sc_tcm_iot, sc->sc_tcm_ioh,
548 	    sc->sc_console_base_addr + BWFM_CONSOLE_BUFADDR);
549 	sc->sc_console_buf_size = bus_space_read_4(sc->sc_tcm_iot, sc->sc_tcm_ioh,
550 	    sc->sc_console_base_addr + BWFM_CONSOLE_BUFSIZE);
551 
552 	/* Read ring information. */
553 	bus_space_read_region_1(sc->sc_tcm_iot, sc->sc_tcm_ioh,
554 	    sc->sc_ring_info_addr, (void *)&ringinfo, sizeof(ringinfo));
555 
556 	if (sc->sc_shared_version >= 6) {
557 		sc->sc_max_submissionrings = le16toh(ringinfo.max_submissionrings);
558 		sc->sc_max_flowrings = le16toh(ringinfo.max_flowrings);
559 		sc->sc_max_completionrings = le16toh(ringinfo.max_completionrings);
560 	} else {
561 		sc->sc_max_submissionrings = le16toh(ringinfo.max_flowrings);
562 		sc->sc_max_flowrings = sc->sc_max_submissionrings -
563 		    BWFM_NUM_TX_MSGRINGS;
564 		sc->sc_max_completionrings = BWFM_NUM_RX_MSGRINGS;
565 	}
566 
567 	if (sc->sc_dma_idx_sz == 0) {
568 		d2h_w_idx_ptr = letoh32(ringinfo.d2h_w_idx_ptr);
569 		d2h_r_idx_ptr = letoh32(ringinfo.d2h_r_idx_ptr);
570 		h2d_w_idx_ptr = letoh32(ringinfo.h2d_w_idx_ptr);
571 		h2d_r_idx_ptr = letoh32(ringinfo.h2d_r_idx_ptr);
572 		idx_offset = sizeof(uint32_t);
573 	} else {
574 		uint64_t address;
575 
576 		/* Each TX/RX Ring has a Read and Write Ptr */
577 		sc->sc_dma_idx_bufsz = (sc->sc_max_submissionrings +
578 		    sc->sc_max_completionrings) * sc->sc_dma_idx_sz * 2;
579 		sc->sc_dma_idx_buf = bwfm_pci_dmamem_alloc(sc,
580 		    sc->sc_dma_idx_bufsz, 8);
581 		if (sc->sc_dma_idx_buf == NULL) {
582 			/* XXX: Fallback to TCM? */
583 			printf("%s: cannot allocate idx buf\n",
584 			    DEVNAME(sc));
585 			return;
586 		}
587 
588 		idx_offset = sc->sc_dma_idx_sz;
589 		h2d_w_idx_ptr = 0;
590 		address = BWFM_PCI_DMA_DVA(sc->sc_dma_idx_buf);
591 		ringinfo.h2d_w_idx_hostaddr_low =
592 		    htole32(address & 0xffffffff);
593 		ringinfo.h2d_w_idx_hostaddr_high =
594 		    htole32(address >> 32);
595 
596 		h2d_r_idx_ptr = h2d_w_idx_ptr +
597 		    sc->sc_max_submissionrings * idx_offset;
598 		address += sc->sc_max_submissionrings * idx_offset;
599 		ringinfo.h2d_r_idx_hostaddr_low =
600 		    htole32(address & 0xffffffff);
601 		ringinfo.h2d_r_idx_hostaddr_high =
602 		    htole32(address >> 32);
603 
604 		d2h_w_idx_ptr = h2d_r_idx_ptr +
605 		    sc->sc_max_submissionrings * idx_offset;
606 		address += sc->sc_max_submissionrings * idx_offset;
607 		ringinfo.d2h_w_idx_hostaddr_low =
608 		    htole32(address & 0xffffffff);
609 		ringinfo.d2h_w_idx_hostaddr_high =
610 		    htole32(address >> 32);
611 
612 		d2h_r_idx_ptr = d2h_w_idx_ptr +
613 		    sc->sc_max_completionrings * idx_offset;
614 		address += sc->sc_max_completionrings * idx_offset;
615 		ringinfo.d2h_r_idx_hostaddr_low =
616 		    htole32(address & 0xffffffff);
617 		ringinfo.d2h_r_idx_hostaddr_high =
618 		    htole32(address >> 32);
619 
620 		bus_space_write_region_1(sc->sc_tcm_iot, sc->sc_tcm_ioh,
621 		    sc->sc_ring_info_addr, (void *)&ringinfo, sizeof(ringinfo));
622 	}
623 
624 	uint32_t ring_mem_ptr = letoh32(ringinfo.ringmem);
625 	/* TX ctrl ring: Send ctrl buffers, send IOCTLs */
626 	if (bwfm_pci_setup_ring(sc, &sc->sc_ctrl_submit, 64, 40,
627 	    h2d_w_idx_ptr, h2d_r_idx_ptr, 0, idx_offset,
628 	    &ring_mem_ptr))
629 		goto cleanup;
630 	/* TX rxpost ring: Send clean data mbufs for RX */
631 	if (bwfm_pci_setup_ring(sc, &sc->sc_rxpost_submit, 512, 32,
632 	    h2d_w_idx_ptr, h2d_r_idx_ptr, 1, idx_offset,
633 	    &ring_mem_ptr))
634 		goto cleanup;
635 	/* RX completion rings: recv our filled buffers back */
636 	if (bwfm_pci_setup_ring(sc, &sc->sc_ctrl_complete, 64, 24,
637 	    d2h_w_idx_ptr, d2h_r_idx_ptr, 0, idx_offset,
638 	    &ring_mem_ptr))
639 		goto cleanup;
640 	if (bwfm_pci_setup_ring(sc, &sc->sc_tx_complete, 1024, 16,
641 	    d2h_w_idx_ptr, d2h_r_idx_ptr, 1, idx_offset,
642 	    &ring_mem_ptr))
643 		goto cleanup;
644 	if (bwfm_pci_setup_ring(sc, &sc->sc_rx_complete, 512, 32,
645 	    d2h_w_idx_ptr, d2h_r_idx_ptr, 2, idx_offset,
646 	    &ring_mem_ptr))
647 		goto cleanup;
648 
649 	/* Dynamic TX rings for actual data */
650 	sc->sc_flowrings = kmem_zalloc(sc->sc_max_flowrings *
651 	    sizeof(struct bwfm_pci_msgring), KM_SLEEP);
652 	for (i = 0; i < sc->sc_max_flowrings; i++) {
653 		struct bwfm_pci_msgring *ring = &sc->sc_flowrings[i];
654 		ring->w_idx_addr = h2d_w_idx_ptr + (i + 2) * idx_offset;
655 		ring->r_idx_addr = h2d_r_idx_ptr + (i + 2) * idx_offset;
656 	}
657 
658 	pool_init(&sc->sc_flowring_pool, sizeof(struct bwfm_cmd_flowring_create),
659 	    0, 0, 0, "bwfmpl", NULL, IPL_NET);
660 
661 	/* Scratch and ring update buffers for firmware */
662 	if ((sc->sc_scratch_buf = bwfm_pci_dmamem_alloc(sc,
663 	    BWFM_DMA_D2H_SCRATCH_BUF_LEN, 8)) == NULL)
664 		goto cleanup;
665 	bus_space_write_4(sc->sc_tcm_iot, sc->sc_tcm_ioh,
666 	    sc->sc_shared_address + BWFM_SHARED_DMA_SCRATCH_ADDR_LOW,
667 	    BWFM_PCI_DMA_DVA(sc->sc_scratch_buf) & 0xffffffff);
668 	bus_space_write_4(sc->sc_tcm_iot, sc->sc_tcm_ioh,
669 	    sc->sc_shared_address + BWFM_SHARED_DMA_SCRATCH_ADDR_HIGH,
670 	    BWFM_PCI_DMA_DVA(sc->sc_scratch_buf) >> 32);
671 	bus_space_write_4(sc->sc_tcm_iot, sc->sc_tcm_ioh,
672 	    sc->sc_shared_address + BWFM_SHARED_DMA_SCRATCH_LEN,
673 	    BWFM_DMA_D2H_SCRATCH_BUF_LEN);
674 
675 	if ((sc->sc_ringupd_buf = bwfm_pci_dmamem_alloc(sc,
676 	    BWFM_DMA_D2H_RINGUPD_BUF_LEN, 8)) == NULL)
677 		goto cleanup;
678 	bus_space_write_4(sc->sc_tcm_iot, sc->sc_tcm_ioh,
679 	    sc->sc_shared_address + BWFM_SHARED_DMA_RINGUPD_ADDR_LOW,
680 	    BWFM_PCI_DMA_DVA(sc->sc_ringupd_buf) & 0xffffffff);
681 	bus_space_write_4(sc->sc_tcm_iot, sc->sc_tcm_ioh,
682 	    sc->sc_shared_address + BWFM_SHARED_DMA_RINGUPD_ADDR_HIGH,
683 	    BWFM_PCI_DMA_DVA(sc->sc_ringupd_buf) >> 32);
684 	bus_space_write_4(sc->sc_tcm_iot, sc->sc_tcm_ioh,
685 	    sc->sc_shared_address + BWFM_SHARED_DMA_RINGUPD_LEN,
686 	    BWFM_DMA_D2H_RINGUPD_BUF_LEN);
687 
688 	if ((sc->sc_ioctl_buf = bwfm_pci_dmamem_alloc(sc,
689 	    BWFM_DMA_H2D_IOCTL_BUF_LEN, 8)) == NULL)
690 		goto cleanup;
691 
692 	if (workqueue_create(&sc->flowring_wq, "bwfmflow",
693 	    bwfm_pci_flowring_create_cb, sc, PRI_SOFTNET, IPL_NET, 0))
694 		goto cleanup;
695 
696 	bwfm_pci_select_core(sc, BWFM_AGENT_CORE_PCIE2);
697 	bwfm_pci_intr_enable(sc);
698 
699 	/* Maps RX mbufs to a packet id and back. */
700 	sc->sc_rx_pkts.npkt = BWFM_NUM_RX_PKTIDS;
701 	sc->sc_rx_pkts.pkts = kmem_zalloc(BWFM_NUM_RX_PKTIDS *
702 	    sizeof(struct bwfm_pci_buf), KM_SLEEP);
703 	for (i = 0; i < BWFM_NUM_RX_PKTIDS; i++)
704 		bus_dmamap_create(sc->sc_dmat, MSGBUF_MAX_PKT_SIZE,
705 		    BWFM_NUM_RX_DESCS, MSGBUF_MAX_PKT_SIZE, 0, BUS_DMA_WAITOK,
706 		    &sc->sc_rx_pkts.pkts[i].bb_map);
707 
708 	/* Maps TX mbufs to a packet id and back. */
709 	sc->sc_tx_pkts.npkt = BWFM_NUM_TX_PKTIDS;
710 	sc->sc_tx_pkts.pkts = kmem_zalloc(BWFM_NUM_TX_PKTIDS
711 	    * sizeof(struct bwfm_pci_buf), KM_SLEEP);
712 	for (i = 0; i < BWFM_NUM_TX_PKTIDS; i++)
713 		bus_dmamap_create(sc->sc_dmat, MSGBUF_MAX_PKT_SIZE,
714 		    BWFM_NUM_TX_DESCS, MSGBUF_MAX_PKT_SIZE, 0, BUS_DMA_WAITOK,
715 		    &sc->sc_tx_pkts.pkts[i].bb_map);
716 
717 	/*
718 	 * For whatever reason, could also be a bug somewhere in this
719 	 * driver, the firmware needs a bunch of RX buffers otherwise
720 	 * it won't send any RX complete messages.  64 buffers don't
721 	 * suffice, but 128 buffers are enough.
722 	 */
723 	if_rxr_init(&sc->sc_rxbuf_ring, 128, sc->sc_max_rxbufpost);
724 	if_rxr_init(&sc->sc_ioctl_ring, 8, 8);
725 	if_rxr_init(&sc->sc_event_ring, 8, 8);
726 	bwfm_pci_fill_rx_rings(sc);
727 
728 
729 #ifdef BWFM_DEBUG
730 	sc->sc_console_readidx = 0;
731 	bwfm_pci_debug_console(sc);
732 #endif
733 
734 	sc->sc_ioctl_poll = 1;
735 	sc->sc_sc.sc_bus_ops = &bwfm_pci_bus_ops;
736 	sc->sc_sc.sc_proto_ops = &bwfm_pci_msgbuf_ops;
737 	bwfm_attach(&sc->sc_sc);
738 	sc->sc_ioctl_poll = 0;
739 	return;
740 
741 cleanup:
742 	if (sc->flowring_wq != NULL)
743 		workqueue_destroy(sc->flowring_wq);
744 	if (sc->sc_ih != NULL) {
745 		pci_intr_disestablish(sc->sc_pc, sc->sc_ih);
746 		pci_intr_release(sc->sc_pc, sc->sc_pihp, 1);
747 	}
748 	if (sc->sc_ioctl_buf)
749 		bwfm_pci_dmamem_free(sc, sc->sc_ioctl_buf);
750 	if (sc->sc_ringupd_buf)
751 		bwfm_pci_dmamem_free(sc, sc->sc_ringupd_buf);
752 	if (sc->sc_scratch_buf)
753 		bwfm_pci_dmamem_free(sc, sc->sc_scratch_buf);
754 	if (sc->sc_rx_complete.ring)
755 		bwfm_pci_dmamem_free(sc, sc->sc_rx_complete.ring);
756 	if (sc->sc_tx_complete.ring)
757 		bwfm_pci_dmamem_free(sc, sc->sc_tx_complete.ring);
758 	if (sc->sc_ctrl_complete.ring)
759 		bwfm_pci_dmamem_free(sc, sc->sc_ctrl_complete.ring);
760 	if (sc->sc_rxpost_submit.ring)
761 		bwfm_pci_dmamem_free(sc, sc->sc_rxpost_submit.ring);
762 	if (sc->sc_ctrl_submit.ring)
763 		bwfm_pci_dmamem_free(sc, sc->sc_ctrl_submit.ring);
764 	if (sc->sc_dma_idx_buf)
765 		bwfm_pci_dmamem_free(sc, sc->sc_dma_idx_buf);
766 }
767 
768 int
769 bwfm_pci_load_microcode(struct bwfm_pci_softc *sc, const u_char *ucode, size_t size)
770 {
771 	struct bwfm_softc *bwfm = (void *)sc;
772 	struct bwfm_core *core;
773 	uint32_t shared;
774 	int i;
775 
776 	if (bwfm->sc_chip.ch_chip == BRCM_CC_43602_CHIP_ID) {
777 		bwfm_pci_select_core(sc, BWFM_AGENT_CORE_ARM_CR4);
778 		bus_space_write_4(sc->sc_reg_iot, sc->sc_reg_ioh,
779 		    BWFM_PCI_ARMCR4REG_BANKIDX, 5);
780 		bus_space_write_4(sc->sc_reg_iot, sc->sc_reg_ioh,
781 		    BWFM_PCI_ARMCR4REG_BANKPDA, 0);
782 		bus_space_write_4(sc->sc_reg_iot, sc->sc_reg_ioh,
783 		    BWFM_PCI_ARMCR4REG_BANKIDX, 7);
784 		bus_space_write_4(sc->sc_reg_iot, sc->sc_reg_ioh,
785 		    BWFM_PCI_ARMCR4REG_BANKPDA, 0);
786 	}
787 
788 	for (i = 0; i < size; i++)
789 		bus_space_write_1(sc->sc_tcm_iot, sc->sc_tcm_ioh,
790 		    bwfm->sc_chip.ch_rambase + i, ucode[i]);
791 
792 	/* Firmware replaces this with a pointer once up. */
793 	bus_space_write_4(sc->sc_tcm_iot, sc->sc_tcm_ioh,
794 	    bwfm->sc_chip.ch_rambase + bwfm->sc_chip.ch_ramsize - 4, 0);
795 
796 	/* TODO: restore NVRAM */
797 
798 	/* Load reset vector from firmware and kickstart core. */
799 	if (bwfm->sc_chip.ch_chip == BRCM_CC_43602_CHIP_ID) {
800 		core = bwfm_chip_get_core(bwfm, BWFM_AGENT_INTERNAL_MEM);
801 		bwfm->sc_chip.ch_core_reset(bwfm, core, 0, 0, 0);
802 	}
803 	bwfm_chip_set_active(bwfm, *(const uint32_t *)ucode);
804 
805 	for (i = 0; i < 40; i++) {
806 		delay(50 * 1000);
807 		shared = bus_space_read_4(sc->sc_tcm_iot, sc->sc_tcm_ioh,
808 		    bwfm->sc_chip.ch_rambase + bwfm->sc_chip.ch_ramsize - 4);
809 		if (shared)
810 			break;
811 	}
812 	if (!shared) {
813 		printf("%s: firmware did not come up\n", DEVNAME(sc));
814 		return 1;
815 	}
816 
817 	sc->sc_shared_address = shared;
818 	return 0;
819 }
820 
821 int
822 bwfm_pci_detach(device_t self, int flags)
823 {
824 	struct bwfm_pci_softc *sc = device_private(self);
825 
826 	bwfm_detach(&sc->sc_sc, flags);
827 
828 	/* FIXME: free RX buffers */
829 	/* FIXME: free TX buffers */
830 	/* FIXME: free more memory */
831 
832 	kmem_free(sc->sc_flowrings, sc->sc_max_flowrings
833 	    * sizeof(struct bwfm_pci_msgring));
834 	pool_destroy(&sc->sc_flowring_pool);
835 
836 	workqueue_destroy(sc->flowring_wq);
837 	pci_intr_disestablish(sc->sc_pc, sc->sc_ih);
838 	pci_intr_release(sc->sc_pc, sc->sc_pihp, 1);
839 	bwfm_pci_dmamem_free(sc, sc->sc_ioctl_buf);
840 	bwfm_pci_dmamem_free(sc, sc->sc_ringupd_buf);
841 	bwfm_pci_dmamem_free(sc, sc->sc_scratch_buf);
842 	bwfm_pci_dmamem_free(sc, sc->sc_rx_complete.ring);
843 	bwfm_pci_dmamem_free(sc, sc->sc_tx_complete.ring);
844 	bwfm_pci_dmamem_free(sc, sc->sc_ctrl_complete.ring);
845 	bwfm_pci_dmamem_free(sc, sc->sc_rxpost_submit.ring);
846 	bwfm_pci_dmamem_free(sc, sc->sc_ctrl_submit.ring);
847 	bwfm_pci_dmamem_free(sc, sc->sc_dma_idx_buf);
848 	return 0;
849 }
850 
851 /* DMA code */
852 struct bwfm_pci_dmamem *
853 bwfm_pci_dmamem_alloc(struct bwfm_pci_softc *sc, bus_size_t size, bus_size_t align)
854 {
855 	struct bwfm_pci_dmamem *bdm;
856 	int nsegs;
857 
858 	bdm = kmem_zalloc(sizeof(*bdm), KM_SLEEP);
859 	bdm->bdm_size = size;
860 
861 	if (bus_dmamap_create(sc->sc_dmat, size, 1, size, 0,
862 	    BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW, &bdm->bdm_map) != 0)
863 		goto bdmfree;
864 
865 	if (bus_dmamem_alloc(sc->sc_dmat, size, align, 0, &bdm->bdm_seg, 1,
866 	    &nsegs, BUS_DMA_WAITOK) != 0)
867 		goto destroy;
868 
869 	if (bus_dmamem_map(sc->sc_dmat, &bdm->bdm_seg, nsegs, size,
870 	    (void **) &bdm->bdm_kva, BUS_DMA_WAITOK | BUS_DMA_COHERENT) != 0)
871 		goto free;
872 
873 	if (bus_dmamap_load(sc->sc_dmat, bdm->bdm_map, bdm->bdm_kva, size,
874 	    NULL, BUS_DMA_WAITOK) != 0)
875 		goto unmap;
876 
877 	bzero(bdm->bdm_kva, size);
878 
879 	return (bdm);
880 
881 unmap:
882 	bus_dmamem_unmap(sc->sc_dmat, bdm->bdm_kva, size);
883 free:
884 	bus_dmamem_free(sc->sc_dmat, &bdm->bdm_seg, 1);
885 destroy:
886 	bus_dmamap_destroy(sc->sc_dmat, bdm->bdm_map);
887 bdmfree:
888 	kmem_free(bdm, sizeof(*bdm));
889 
890 	return (NULL);
891 }
892 
893 void
894 bwfm_pci_dmamem_free(struct bwfm_pci_softc *sc, struct bwfm_pci_dmamem *bdm)
895 {
896 	bus_dmamem_unmap(sc->sc_dmat, bdm->bdm_kva, bdm->bdm_size);
897 	bus_dmamem_free(sc->sc_dmat, &bdm->bdm_seg, 1);
898 	bus_dmamap_destroy(sc->sc_dmat, bdm->bdm_map);
899 	kmem_free(bdm, sizeof(*bdm));
900 }
901 
902 /*
903  * We need a simple mapping from a packet ID to mbufs, because when
904  * a transfer completed, we only know the ID so we have to look up
905  * the memory for the ID.  This simply looks for an empty slot.
906  */
907 int
908 bwfm_pci_pktid_avail(struct bwfm_pci_softc *sc, struct bwfm_pci_pkts *pkts)
909 {
910 	int i, idx;
911 
912 	idx = pkts->last + 1;
913 	for (i = 0; i < pkts->npkt; i++) {
914 		if (idx == pkts->npkt)
915 			idx = 0;
916 		if (pkts->pkts[idx].bb_m == NULL)
917 			return 0;
918 		idx++;
919 	}
920 	return ENOBUFS;
921 }
922 
923 int
924 bwfm_pci_pktid_new(struct bwfm_pci_softc *sc, struct bwfm_pci_pkts *pkts,
925     struct mbuf **mp, uint32_t *pktid, paddr_t *paddr)
926 {
927 	int i, idx;
928 
929 	idx = pkts->last + 1;
930 	for (i = 0; i < pkts->npkt; i++) {
931 		if (idx == pkts->npkt)
932 			idx = 0;
933 		if (pkts->pkts[idx].bb_m == NULL) {
934 			if (bus_dmamap_load_mbuf(sc->sc_dmat,
935 			    pkts->pkts[idx].bb_map, *mp, BUS_DMA_NOWAIT) != 0) {
936 				/*
937 				 * Didn't fit.  Maybe it has too many
938 				 * segments.  If it has only one
939 				 * segment, fail; otherwise try to
940 				 * compact it into a single mbuf
941 				 * segment.
942 				 */
943 				if ((*mp)->m_next == NULL)
944 					return ENOBUFS;
945 				struct mbuf *m0 = MCLGETI(NULL, M_DONTWAIT,
946 				    NULL, MSGBUF_MAX_PKT_SIZE);
947 				if (m0 == NULL)
948 					return ENOBUFS;
949 				m_copydata(*mp, 0, (*mp)->m_pkthdr.len,
950 				    mtod(m0, void *));
951 				m0->m_pkthdr.len = m0->m_len =
952 				    (*mp)->m_pkthdr.len;
953 				m_freem(*mp);
954 				*mp = m0;
955 				if (bus_dmamap_load_mbuf(sc->sc_dmat,
956 				    pkts->pkts[idx].bb_map, *mp, BUS_DMA_NOWAIT) != 0)
957 					return EFBIG;
958 			}
959 			bus_dmamap_sync(sc->sc_dmat, pkts->pkts[idx].bb_map,
960 			    0, pkts->pkts[idx].bb_map->dm_mapsize,
961 			    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
962 			pkts->last = idx;
963 			pkts->pkts[idx].bb_m = *mp;
964 			*pktid = idx;
965 			*paddr = pkts->pkts[idx].bb_map->dm_segs[0].ds_addr;
966 			return 0;
967 		}
968 		idx++;
969 	}
970 	return ENOBUFS;
971 }
972 
973 struct mbuf *
974 bwfm_pci_pktid_free(struct bwfm_pci_softc *sc, struct bwfm_pci_pkts *pkts,
975     uint32_t pktid)
976 {
977 	struct mbuf *m;
978 
979 	if (pktid >= pkts->npkt || pkts->pkts[pktid].bb_m == NULL)
980 		return NULL;
981 	bus_dmamap_sync(sc->sc_dmat, pkts->pkts[pktid].bb_map, 0,
982 	    pkts->pkts[pktid].bb_map->dm_mapsize,
983 	    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
984 	bus_dmamap_unload(sc->sc_dmat, pkts->pkts[pktid].bb_map);
985 	m = pkts->pkts[pktid].bb_m;
986 	pkts->pkts[pktid].bb_m = NULL;
987 	return m;
988 }
989 
990 void
991 bwfm_pci_fill_rx_rings(struct bwfm_pci_softc *sc)
992 {
993 	bwfm_pci_fill_rx_buf_ring(sc);
994 	bwfm_pci_fill_rx_ioctl_ring(sc, &sc->sc_ioctl_ring,
995 	    MSGBUF_TYPE_IOCTLRESP_BUF_POST);
996 	bwfm_pci_fill_rx_ioctl_ring(sc, &sc->sc_event_ring,
997 	    MSGBUF_TYPE_EVENT_BUF_POST);
998 }
999 
1000 void
1001 bwfm_pci_fill_rx_ioctl_ring(struct bwfm_pci_softc *sc, struct if_rxring *rxring,
1002     uint32_t msgtype)
1003 {
1004 	struct msgbuf_rx_ioctl_resp_or_event *req;
1005 	struct mbuf *m;
1006 	uint32_t pktid;
1007 	paddr_t paddr;
1008 	int s, slots;
1009 	uint64_t devaddr;
1010 
1011 	s = splnet();
1012 	for (slots = if_rxr_get(rxring, 8); slots > 0; slots--) {
1013 		if (bwfm_pci_pktid_avail(sc, &sc->sc_rx_pkts))
1014 			break;
1015 		req = bwfm_pci_ring_write_reserve(sc, &sc->sc_ctrl_submit);
1016 		if (req == NULL)
1017 			break;
1018 		m = MCLGETI(NULL, M_DONTWAIT, NULL, MSGBUF_MAX_PKT_SIZE);
1019 		if (m == NULL) {
1020 			bwfm_pci_ring_write_cancel(sc, &sc->sc_ctrl_submit, 1);
1021 			break;
1022 		}
1023 		m->m_len = m->m_pkthdr.len = MSGBUF_MAX_PKT_SIZE;
1024 		if (bwfm_pci_pktid_new(sc, &sc->sc_rx_pkts, &m, &pktid, &paddr)) {
1025 			bwfm_pci_ring_write_cancel(sc, &sc->sc_ctrl_submit, 1);
1026 			m_freem(m);
1027 			break;
1028 		}
1029 		devaddr = paddr;
1030 		memset(req, 0, sizeof(*req));
1031 		req->msg.msgtype = msgtype;
1032 		req->msg.request_id = htole32(pktid);
1033 		req->host_buf_len = htole16(MSGBUF_MAX_PKT_SIZE);
1034 		req->host_buf_addr.high_addr = htole32(devaddr >> 32);
1035 		req->host_buf_addr.low_addr = htole32(devaddr & 0xffffffff);
1036 		bwfm_pci_ring_write_commit(sc, &sc->sc_ctrl_submit);
1037 	}
1038 	if_rxr_put(rxring, slots);
1039 	splx(s);
1040 }
1041 
1042 void
1043 bwfm_pci_fill_rx_buf_ring(struct bwfm_pci_softc *sc)
1044 {
1045 	struct msgbuf_rx_bufpost *req;
1046 	struct mbuf *m;
1047 	uint32_t pktid;
1048 	paddr_t paddr;
1049 	int s, slots;
1050 	uint64_t devaddr;
1051 
1052 	s = splnet();
1053 	for (slots = if_rxr_get(&sc->sc_rxbuf_ring, sc->sc_max_rxbufpost);
1054 	    slots > 0; slots--) {
1055 		if (bwfm_pci_pktid_avail(sc, &sc->sc_rx_pkts))
1056 			break;
1057 		req = bwfm_pci_ring_write_reserve(sc, &sc->sc_rxpost_submit);
1058 		if (req == NULL)
1059 			break;
1060 		m = MCLGETI(NULL, M_DONTWAIT, NULL, MSGBUF_MAX_PKT_SIZE);
1061 		if (m == NULL) {
1062 			bwfm_pci_ring_write_cancel(sc, &sc->sc_rxpost_submit, 1);
1063 			break;
1064 		}
1065 		m->m_len = m->m_pkthdr.len = MSGBUF_MAX_PKT_SIZE;
1066 		if (bwfm_pci_pktid_new(sc, &sc->sc_rx_pkts, &m, &pktid, &paddr)) {
1067 			bwfm_pci_ring_write_cancel(sc, &sc->sc_rxpost_submit, 1);
1068 			m_freem(m);
1069 			break;
1070 		}
1071 		devaddr = paddr;
1072 		memset(req, 0, sizeof(*req));
1073 		req->msg.msgtype = MSGBUF_TYPE_RXBUF_POST;
1074 		req->msg.request_id = htole32(pktid);
1075 		req->data_buf_len = htole16(MSGBUF_MAX_PKT_SIZE);
1076 		req->data_buf_addr.high_addr = htole32(devaddr >> 32);
1077 		req->data_buf_addr.low_addr = htole32(devaddr & 0xffffffff);
1078 		bwfm_pci_ring_write_commit(sc, &sc->sc_rxpost_submit);
1079 	}
1080 	if_rxr_put(&sc->sc_rxbuf_ring, slots);
1081 	splx(s);
1082 }
1083 
1084 int
1085 bwfm_pci_setup_ring(struct bwfm_pci_softc *sc, struct bwfm_pci_msgring *ring,
1086     int nitem, size_t itemsz, uint32_t w_idx, uint32_t r_idx,
1087     int idx, uint32_t idx_off, uint32_t *ring_mem)
1088 {
1089 	ring->w_idx_addr = w_idx + idx * idx_off;
1090 	ring->r_idx_addr = r_idx + idx * idx_off;
1091 	ring->nitem = nitem;
1092 	ring->itemsz = itemsz;
1093 	bwfm_pci_ring_write_rptr(sc, ring);
1094 	bwfm_pci_ring_write_wptr(sc, ring);
1095 
1096 	ring->ring = bwfm_pci_dmamem_alloc(sc, nitem * itemsz, 8);
1097 	if (ring->ring == NULL)
1098 		return ENOMEM;
1099 	bus_space_write_4(sc->sc_tcm_iot, sc->sc_tcm_ioh,
1100 	    *ring_mem + BWFM_RING_MEM_BASE_ADDR_LOW,
1101 	    BWFM_PCI_DMA_DVA(ring->ring) & 0xffffffff);
1102 	bus_space_write_4(sc->sc_tcm_iot, sc->sc_tcm_ioh,
1103 	    *ring_mem + BWFM_RING_MEM_BASE_ADDR_HIGH,
1104 	    BWFM_PCI_DMA_DVA(ring->ring) >> 32);
1105 	bus_space_write_2(sc->sc_tcm_iot, sc->sc_tcm_ioh,
1106 	    *ring_mem + BWFM_RING_MAX_ITEM, nitem);
1107 	bus_space_write_2(sc->sc_tcm_iot, sc->sc_tcm_ioh,
1108 	    *ring_mem + BWFM_RING_LEN_ITEMS, itemsz);
1109 	*ring_mem = *ring_mem + BWFM_RING_MEM_SZ;
1110 	return 0;
1111 }
1112 
1113 int
1114 bwfm_pci_setup_flowring(struct bwfm_pci_softc *sc, struct bwfm_pci_msgring *ring,
1115     int nitem, size_t itemsz)
1116 {
1117 	ring->w_ptr = 0;
1118 	ring->r_ptr = 0;
1119 	ring->nitem = nitem;
1120 	ring->itemsz = itemsz;
1121 	bwfm_pci_ring_write_rptr(sc, ring);
1122 	bwfm_pci_ring_write_wptr(sc, ring);
1123 
1124 	ring->ring = bwfm_pci_dmamem_alloc(sc, nitem * itemsz, 8);
1125 	if (ring->ring == NULL)
1126 		return ENOMEM;
1127 	return 0;
1128 }
1129 
1130 /* Ring helpers */
1131 void
1132 bwfm_pci_ring_bell(struct bwfm_pci_softc *sc,
1133     struct bwfm_pci_msgring *ring)
1134 {
1135 	bus_space_write_4(sc->sc_reg_iot, sc->sc_reg_ioh,
1136 	    BWFM_PCI_PCIE2REG_H2D_MAILBOX, 1);
1137 }
1138 
1139 void
1140 bwfm_pci_ring_update_rptr(struct bwfm_pci_softc *sc,
1141     struct bwfm_pci_msgring *ring)
1142 {
1143 	if (sc->sc_dma_idx_sz == 0) {
1144 		ring->r_ptr = bus_space_read_2(sc->sc_tcm_iot,
1145 		    sc->sc_tcm_ioh, ring->r_idx_addr);
1146 	} else {
1147 		bus_dmamap_sync(sc->sc_dmat,
1148 		    BWFM_PCI_DMA_MAP(sc->sc_dma_idx_buf), ring->r_idx_addr,
1149 		    sizeof(uint16_t), BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1150 		ring->r_ptr = *(uint16_t *)(BWFM_PCI_DMA_KVA(sc->sc_dma_idx_buf)
1151 		    + ring->r_idx_addr);
1152 	}
1153 }
1154 
1155 static u_int
1156 if_rxr_get(struct if_rxring *rxr, unsigned int max)
1157 {
1158 	u_int taken = MIN(max, (rxr->rxr_total - rxr->rxr_inuse));
1159 
1160 	KASSERTMSG(rxr->rxr_inuse + taken <= rxr->rxr_total,
1161 			"rxr->rxr_inuse: %d\n"
1162 			"taken: %d\n"
1163 			"rxr->rxr_total: %d\n",
1164 			rxr->rxr_inuse, taken, rxr->rxr_total);
1165 	rxr->rxr_inuse += taken;
1166 
1167 	return taken;
1168 }
1169 
1170 static void
1171 if_rxr_put(struct if_rxring *rxr, unsigned int n)
1172 {
1173 	KASSERTMSG(rxr->rxr_inuse >= n,
1174 			"rxr->rxr_inuse: %d\n"
1175 			"n: %d\n"
1176 			"rxr->rxr_total: %d\n",
1177 			rxr->rxr_inuse, n, rxr->rxr_total);
1178 
1179 	rxr->rxr_inuse -= n;
1180 }
1181 
1182 static void
1183 if_rxr_init(struct if_rxring *rxr, unsigned int lwm __unused, unsigned int hwm)
1184 {
1185 	(void) lwm;
1186 
1187 	rxr->rxr_total = hwm;
1188 	rxr->rxr_inuse = 0;
1189 }
1190 
1191 void
1192 bwfm_pci_ring_update_wptr(struct bwfm_pci_softc *sc,
1193     struct bwfm_pci_msgring *ring)
1194 {
1195 	if (sc->sc_dma_idx_sz == 0) {
1196 		ring->w_ptr = bus_space_read_2(sc->sc_tcm_iot,
1197 		    sc->sc_tcm_ioh, ring->w_idx_addr);
1198 	} else {
1199 		ring->w_ptr = *(uint16_t *)(BWFM_PCI_DMA_KVA(sc->sc_dma_idx_buf)
1200 		    + ring->w_idx_addr);
1201 		bus_dmamap_sync(sc->sc_dmat,
1202 		    BWFM_PCI_DMA_MAP(sc->sc_dma_idx_buf), ring->w_idx_addr,
1203 		    sizeof(uint16_t), BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1204 	}
1205 }
1206 
1207 void
1208 bwfm_pci_ring_write_rptr(struct bwfm_pci_softc *sc,
1209     struct bwfm_pci_msgring *ring)
1210 {
1211 	if (sc->sc_dma_idx_sz == 0) {
1212 		bus_space_write_2(sc->sc_tcm_iot, sc->sc_tcm_ioh,
1213 		    ring->r_idx_addr, ring->r_ptr);
1214 	} else {
1215 		*(uint16_t *)(BWFM_PCI_DMA_KVA(sc->sc_dma_idx_buf)
1216 		    + ring->r_idx_addr) = ring->r_ptr;
1217 		bus_dmamap_sync(sc->sc_dmat,
1218 		    BWFM_PCI_DMA_MAP(sc->sc_dma_idx_buf), ring->r_idx_addr,
1219 		    sizeof(uint16_t), BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1220 	}
1221 }
1222 
1223 void
1224 bwfm_pci_ring_write_wptr(struct bwfm_pci_softc *sc,
1225     struct bwfm_pci_msgring *ring)
1226 {
1227 	if (sc->sc_dma_idx_sz == 0) {
1228 		bus_space_write_2(sc->sc_tcm_iot, sc->sc_tcm_ioh,
1229 		    ring->w_idx_addr, ring->w_ptr);
1230 	} else {
1231 		*(uint16_t *)(BWFM_PCI_DMA_KVA(sc->sc_dma_idx_buf)
1232 		    + ring->w_idx_addr) = ring->w_ptr;
1233 		bus_dmamap_sync(sc->sc_dmat,
1234 		    BWFM_PCI_DMA_MAP(sc->sc_dma_idx_buf), ring->w_idx_addr,
1235 		    sizeof(uint16_t), BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1236 	}
1237 }
1238 
1239 /*
1240  * Retrieve a free descriptor to put new stuff in, but don't commit
1241  * to it yet so we can rollback later if any error occurs.
1242  */
1243 void *
1244 bwfm_pci_ring_write_reserve(struct bwfm_pci_softc *sc,
1245     struct bwfm_pci_msgring *ring)
1246 {
1247 	int available;
1248 	char *ret;
1249 
1250 	bwfm_pci_ring_update_rptr(sc, ring);
1251 
1252 	if (ring->r_ptr > ring->w_ptr)
1253 		available = ring->r_ptr - ring->w_ptr;
1254 	else
1255 		available = ring->r_ptr + (ring->nitem - ring->w_ptr);
1256 
1257 	if (available < 1)
1258 		return NULL;
1259 
1260 	ret = BWFM_PCI_DMA_KVA(ring->ring) + (ring->w_ptr * ring->itemsz);
1261 	ring->w_ptr += 1;
1262 	if (ring->w_ptr == ring->nitem)
1263 		ring->w_ptr = 0;
1264 	return ret;
1265 }
1266 
1267 void *
1268 bwfm_pci_ring_write_reserve_multi(struct bwfm_pci_softc *sc,
1269     struct bwfm_pci_msgring *ring, int count, int *avail)
1270 {
1271 	int available;
1272 	char *ret;
1273 
1274 	bwfm_pci_ring_update_rptr(sc, ring);
1275 
1276 	if (ring->r_ptr > ring->w_ptr)
1277 		available = ring->r_ptr - ring->w_ptr;
1278 	else
1279 		available = ring->r_ptr + (ring->nitem - ring->w_ptr);
1280 
1281 	if (available < 1)
1282 		return NULL;
1283 
1284 	ret = BWFM_PCI_DMA_KVA(ring->ring) + (ring->w_ptr * ring->itemsz);
1285 	*avail = uimin(count, available - 1);
1286 	if (*avail + ring->w_ptr > ring->nitem)
1287 		*avail = ring->nitem - ring->w_ptr;
1288 	ring->w_ptr += *avail;
1289 	if (ring->w_ptr == ring->nitem)
1290 		ring->w_ptr = 0;
1291 	return ret;
1292 }
1293 
1294 /*
1295  * Read number of descriptors available (submitted by the firmware)
1296  * and retrieve pointer to first descriptor.
1297  */
1298 void *
1299 bwfm_pci_ring_read_avail(struct bwfm_pci_softc *sc,
1300     struct bwfm_pci_msgring *ring, int *avail)
1301 {
1302 	bwfm_pci_ring_update_wptr(sc, ring);
1303 
1304 	if (ring->w_ptr >= ring->r_ptr)
1305 		*avail = ring->w_ptr - ring->r_ptr;
1306 	else
1307 		*avail = ring->nitem - ring->r_ptr;
1308 
1309 	if (*avail == 0)
1310 		return NULL;
1311 	bus_dmamap_sync(sc->sc_dmat, BWFM_PCI_DMA_MAP(ring->ring),
1312 	    ring->r_ptr * ring->itemsz, *avail * ring->itemsz,
1313 	    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1314 	return BWFM_PCI_DMA_KVA(ring->ring) + (ring->r_ptr * ring->itemsz);
1315 }
1316 
1317 /*
1318  * Let firmware know we read N descriptors.
1319  */
1320 void
1321 bwfm_pci_ring_read_commit(struct bwfm_pci_softc *sc,
1322     struct bwfm_pci_msgring *ring, int nitem)
1323 {
1324 	ring->r_ptr += nitem;
1325 	if (ring->r_ptr == ring->nitem)
1326 		ring->r_ptr = 0;
1327 	bwfm_pci_ring_write_rptr(sc, ring);
1328 }
1329 
1330 /*
1331  * Let firmware know that we submitted some descriptors.
1332  */
1333 void
1334 bwfm_pci_ring_write_commit(struct bwfm_pci_softc *sc,
1335     struct bwfm_pci_msgring *ring)
1336 {
1337 	bus_dmamap_sync(sc->sc_dmat, BWFM_PCI_DMA_MAP(ring->ring),
1338 	    0, BWFM_PCI_DMA_LEN(ring->ring), BUS_DMASYNC_PREREAD |
1339 	    BUS_DMASYNC_PREWRITE);
1340 	bwfm_pci_ring_write_wptr(sc, ring);
1341 	bwfm_pci_ring_bell(sc, ring);
1342 }
1343 
1344 /*
1345  * Rollback N descriptors in case we don't actually want
1346  * to commit to it.
1347  */
1348 void
1349 bwfm_pci_ring_write_cancel(struct bwfm_pci_softc *sc,
1350     struct bwfm_pci_msgring *ring, int nitem)
1351 {
1352 	if (ring->w_ptr == 0)
1353 		ring->w_ptr = ring->nitem - nitem;
1354 	else
1355 		ring->w_ptr -= nitem;
1356 }
1357 
1358 /*
1359  * Foreach written descriptor on the ring, pass the descriptor to
1360  * a message handler and let the firmware know we handled it.
1361  */
1362 void
1363 bwfm_pci_ring_rx(struct bwfm_pci_softc *sc, struct bwfm_pci_msgring *ring)
1364 {
1365 	char *buf;
1366 	int avail, processed;
1367 
1368 again:
1369 	buf = bwfm_pci_ring_read_avail(sc, ring, &avail);
1370 	if (buf == NULL)
1371 		return;
1372 
1373 	processed = 0;
1374 	while (avail) {
1375 		bwfm_pci_msg_rx(sc, buf + sc->sc_rx_dataoffset);
1376 		buf += ring->itemsz;
1377 		processed++;
1378 		if (processed == 48) {
1379 			bwfm_pci_ring_read_commit(sc, ring, processed);
1380 			processed = 0;
1381 		}
1382 		avail--;
1383 	}
1384 	if (processed)
1385 		bwfm_pci_ring_read_commit(sc, ring, processed);
1386 	if (ring->r_ptr == 0)
1387 		goto again;
1388 }
1389 
1390 void
1391 bwfm_pci_msg_rx(struct bwfm_pci_softc *sc, void *buf)
1392 {
1393 	struct ifnet *ifp = sc->sc_sc.sc_ic.ic_ifp;
1394 	struct msgbuf_ioctl_resp_hdr *resp;
1395 	struct msgbuf_tx_status *tx;
1396 	struct msgbuf_rx_complete *rx;
1397 	struct msgbuf_rx_event *event;
1398 	struct msgbuf_common_hdr *msg;
1399 	struct msgbuf_flowring_create_resp *fcr;
1400 	struct msgbuf_flowring_delete_resp *fdr;
1401 	struct bwfm_pci_msgring *ring;
1402 	struct mbuf *m;
1403 	int flowid;
1404 
1405 	msg = (struct msgbuf_common_hdr *)buf;
1406 	switch (msg->msgtype)
1407 	{
1408 	case MSGBUF_TYPE_FLOW_RING_CREATE_CMPLT:
1409 		fcr = (struct msgbuf_flowring_create_resp *)buf;
1410 		flowid = letoh16(fcr->compl_hdr.flow_ring_id);
1411 		if (flowid < 2)
1412 			break;
1413 		flowid -= 2;
1414 		if (flowid >= sc->sc_max_flowrings)
1415 			break;
1416 		ring = &sc->sc_flowrings[flowid];
1417 		if (ring->status != RING_OPENING)
1418 			break;
1419 		if (fcr->compl_hdr.status) {
1420 			printf("%s: failed to open flowring %d\n",
1421 			    DEVNAME(sc), flowid);
1422 			ring->status = RING_CLOSED;
1423 			if (ring->m) {
1424 				m_freem(ring->m);
1425 				ring->m = NULL;
1426 			}
1427 			ifp->if_flags &= ~IFF_OACTIVE;
1428 			ifp->if_start(ifp);
1429 			break;
1430 		}
1431 		ring->status = RING_OPEN;
1432 		if (ring->m != NULL) {
1433 			m = ring->m;
1434 			ring->m = NULL;
1435 			if (bwfm_pci_txdata(&sc->sc_sc, &m))
1436 				m_freem(ring->m);
1437 		}
1438 		ifp->if_flags &= ~IFF_OACTIVE;
1439 		ifp->if_start(ifp);
1440 		break;
1441 	case MSGBUF_TYPE_FLOW_RING_DELETE_CMPLT:
1442 		fdr = (struct msgbuf_flowring_delete_resp *)buf;
1443 		flowid = letoh16(fdr->compl_hdr.flow_ring_id);
1444 		if (flowid < 2)
1445 			break;
1446 		flowid -= 2;
1447 		if (flowid >= sc->sc_max_flowrings)
1448 			break;
1449 		ring = &sc->sc_flowrings[flowid];
1450 		if (ring->status != RING_CLOSING)
1451 			break;
1452 		if (fdr->compl_hdr.status) {
1453 			printf("%s: failed to delete flowring %d\n",
1454 			    DEVNAME(sc), flowid);
1455 			break;
1456 		}
1457 		bwfm_pci_dmamem_free(sc, ring->ring);
1458 		ring->status = RING_CLOSED;
1459 		break;
1460 	case MSGBUF_TYPE_IOCTLPTR_REQ_ACK:
1461 		break;
1462 	case MSGBUF_TYPE_IOCTL_CMPLT:
1463 		resp = (struct msgbuf_ioctl_resp_hdr *)buf;
1464 		sc->sc_ioctl_resp_pktid = letoh32(resp->msg.request_id);
1465 		sc->sc_ioctl_resp_ret_len = letoh16(resp->resp_len);
1466 		sc->sc_ioctl_resp_status = letoh16(resp->compl_hdr.status);
1467 		if_rxr_put(&sc->sc_ioctl_ring, 1);
1468 		bwfm_pci_fill_rx_rings(sc);
1469 		wakeup(&sc->sc_ioctl_buf);
1470 		break;
1471 	case MSGBUF_TYPE_WL_EVENT:
1472 		event = (struct msgbuf_rx_event *)buf;
1473 		m = bwfm_pci_pktid_free(sc, &sc->sc_rx_pkts,
1474 		    letoh32(event->msg.request_id));
1475 		if (m == NULL)
1476 			break;
1477 		m_adj(m, sc->sc_rx_dataoffset);
1478 		m->m_len = m->m_pkthdr.len = letoh16(event->event_data_len);
1479 		bwfm_rx(&sc->sc_sc, m);
1480 		if_rxr_put(&sc->sc_event_ring, 1);
1481 		bwfm_pci_fill_rx_rings(sc);
1482 		break;
1483 	case MSGBUF_TYPE_TX_STATUS:
1484 		tx = (struct msgbuf_tx_status *)buf;
1485 		m = bwfm_pci_pktid_free(sc, &sc->sc_tx_pkts,
1486 		    letoh32(tx->msg.request_id));
1487 		if (m == NULL)
1488 			break;
1489 		m_freem(m);
1490 		if (sc->sc_tx_pkts_full) {
1491 			sc->sc_tx_pkts_full = 0;
1492 			ifp->if_flags &= ~IFF_OACTIVE;
1493 			ifp->if_start(ifp);
1494 		}
1495 		break;
1496 	case MSGBUF_TYPE_RX_CMPLT:
1497 		rx = (struct msgbuf_rx_complete *)buf;
1498 		m = bwfm_pci_pktid_free(sc, &sc->sc_rx_pkts,
1499 		    letoh32(rx->msg.request_id));
1500 		if (m == NULL)
1501 			break;
1502 		if (letoh16(rx->data_offset))
1503 			m_adj(m, letoh16(rx->data_offset));
1504 		else if (sc->sc_rx_dataoffset)
1505 			m_adj(m, sc->sc_rx_dataoffset);
1506 		m->m_len = m->m_pkthdr.len = letoh16(rx->data_len);
1507 		bwfm_rx(&sc->sc_sc, m);
1508 		if_rxr_put(&sc->sc_rxbuf_ring, 1);
1509 		bwfm_pci_fill_rx_rings(sc);
1510 		break;
1511 	default:
1512 		printf("%s: msgtype 0x%08x\n", __func__, msg->msgtype);
1513 		break;
1514 	}
1515 }
1516 
1517 /* Bus core helpers */
1518 void
1519 bwfm_pci_select_core(struct bwfm_pci_softc *sc, int id)
1520 {
1521 	struct bwfm_softc *bwfm = (void *)sc;
1522 	struct bwfm_core *core;
1523 
1524 	core = bwfm_chip_get_core(bwfm, id);
1525 	if (core == NULL) {
1526 		printf("%s: could not find core to select", DEVNAME(sc));
1527 		return;
1528 	}
1529 
1530 	pci_conf_write(sc->sc_pc, sc->sc_tag,
1531 	    BWFM_PCI_BAR0_WINDOW, core->co_base);
1532 	if (pci_conf_read(sc->sc_pc, sc->sc_tag,
1533 	    BWFM_PCI_BAR0_WINDOW) != core->co_base)
1534 		pci_conf_write(sc->sc_pc, sc->sc_tag,
1535 		    BWFM_PCI_BAR0_WINDOW, core->co_base);
1536 }
1537 
1538 uint32_t
1539 bwfm_pci_buscore_read(struct bwfm_softc *bwfm, uint32_t reg)
1540 {
1541 	struct bwfm_pci_softc *sc = (void *)bwfm;
1542 	uint32_t page, offset;
1543 
1544 	page = reg & ~(BWFM_PCI_BAR0_REG_SIZE - 1);
1545 	offset = reg & (BWFM_PCI_BAR0_REG_SIZE - 1);
1546 	pci_conf_write(sc->sc_pc, sc->sc_tag, BWFM_PCI_BAR0_WINDOW, page);
1547 	return bus_space_read_4(sc->sc_reg_iot, sc->sc_reg_ioh, offset);
1548 }
1549 
1550 void
1551 bwfm_pci_buscore_write(struct bwfm_softc *bwfm, uint32_t reg, uint32_t val)
1552 {
1553 	struct bwfm_pci_softc *sc = (void *)bwfm;
1554 	uint32_t page, offset;
1555 
1556 	page = reg & ~(BWFM_PCI_BAR0_REG_SIZE - 1);
1557 	offset = reg & (BWFM_PCI_BAR0_REG_SIZE - 1);
1558 	pci_conf_write(sc->sc_pc, sc->sc_tag, BWFM_PCI_BAR0_WINDOW, page);
1559 	bus_space_write_4(sc->sc_reg_iot, sc->sc_reg_ioh, offset, val);
1560 }
1561 
1562 int
1563 bwfm_pci_buscore_prepare(struct bwfm_softc *bwfm)
1564 {
1565 	return 0;
1566 }
1567 
1568 int
1569 bwfm_pci_buscore_reset(struct bwfm_softc *bwfm)
1570 {
1571 	struct bwfm_pci_softc *sc = (void *)bwfm;
1572 	struct bwfm_core *core;
1573 	uint32_t reg;
1574 	int i;
1575 
1576 	bwfm_pci_select_core(sc, BWFM_AGENT_CORE_PCIE2);
1577 	reg = pci_conf_read(sc->sc_pc, sc->sc_tag,
1578 	    BWFM_PCI_CFGREG_LINK_STATUS_CTRL);
1579 	pci_conf_write(sc->sc_pc, sc->sc_tag, BWFM_PCI_CFGREG_LINK_STATUS_CTRL,
1580 	    reg & ~BWFM_PCI_CFGREG_LINK_STATUS_CTRL_ASPM_ENAB);
1581 
1582 	bwfm_pci_select_core(sc, BWFM_AGENT_CORE_CHIPCOMMON);
1583 	bus_space_write_4(sc->sc_reg_iot, sc->sc_reg_ioh,
1584 	    BWFM_CHIP_REG_WATCHDOG, 4);
1585 	delay(100 * 1000);
1586 
1587 	bwfm_pci_select_core(sc, BWFM_AGENT_CORE_PCIE2);
1588 	pci_conf_write(sc->sc_pc, sc->sc_tag,
1589 	    BWFM_PCI_CFGREG_LINK_STATUS_CTRL, reg);
1590 
1591 	core = bwfm_chip_get_core(bwfm, BWFM_AGENT_CORE_PCIE2);
1592 	if (core->co_rev <= 13) {
1593 		uint16_t cfg_offset[] = {
1594 		    BWFM_PCI_CFGREG_STATUS_CMD,
1595 		    BWFM_PCI_CFGREG_PM_CSR,
1596 		    BWFM_PCI_CFGREG_MSI_CAP,
1597 		    BWFM_PCI_CFGREG_MSI_ADDR_L,
1598 		    BWFM_PCI_CFGREG_MSI_ADDR_H,
1599 		    BWFM_PCI_CFGREG_MSI_DATA,
1600 		    BWFM_PCI_CFGREG_LINK_STATUS_CTRL2,
1601 		    BWFM_PCI_CFGREG_RBAR_CTRL,
1602 		    BWFM_PCI_CFGREG_PML1_SUB_CTRL1,
1603 		    BWFM_PCI_CFGREG_REG_BAR2_CONFIG,
1604 		    BWFM_PCI_CFGREG_REG_BAR3_CONFIG,
1605 		};
1606 
1607 		for (i = 0; i < nitems(cfg_offset); i++) {
1608 			bus_space_write_4(sc->sc_reg_iot, sc->sc_reg_ioh,
1609 			    BWFM_PCI_PCIE2REG_CONFIGADDR, cfg_offset[i]);
1610 			reg = bus_space_read_4(sc->sc_reg_iot, sc->sc_reg_ioh,
1611 			    BWFM_PCI_PCIE2REG_CONFIGDATA);
1612 			DPRINTFN(3, ("%s: config offset 0x%04x, value 0x%04x\n",
1613 			    DEVNAME(sc), cfg_offset[i], reg));
1614 			bus_space_write_4(sc->sc_reg_iot, sc->sc_reg_ioh,
1615 			    BWFM_PCI_PCIE2REG_CONFIGDATA, reg);
1616 		}
1617 	}
1618 
1619 	reg = bus_space_read_4(sc->sc_reg_iot, sc->sc_reg_ioh,
1620 	    BWFM_PCI_PCIE2REG_MAILBOXINT);
1621 	if (reg != 0xffffffff)
1622 		bus_space_write_4(sc->sc_reg_iot, sc->sc_reg_ioh,
1623 		    BWFM_PCI_PCIE2REG_MAILBOXINT, reg);
1624 
1625 	return 0;
1626 }
1627 
1628 void
1629 bwfm_pci_buscore_activate(struct bwfm_softc *bwfm, const uint32_t rstvec)
1630 {
1631 	struct bwfm_pci_softc *sc = (void *)bwfm;
1632 	bus_space_write_4(sc->sc_tcm_iot, sc->sc_tcm_ioh, 0, rstvec);
1633 }
1634 
1635 static int bwfm_pci_prio2fifo[8] = {
1636 	1, /* best effort */
1637 	0, /* IPTOS_PREC_IMMEDIATE */
1638 	0, /* IPTOS_PREC_PRIORITY */
1639 	1, /* IPTOS_PREC_FLASH */
1640 	2, /* IPTOS_PREC_FLASHOVERRIDE */
1641 	2, /* IPTOS_PREC_CRITIC_ECP */
1642 	3, /* IPTOS_PREC_INTERNETCONTROL */
1643 	3, /* IPTOS_PREC_NETCONTROL */
1644 };
1645 
1646 int
1647 bwfm_pci_flowring_lookup(struct bwfm_pci_softc *sc, struct mbuf *m)
1648 {
1649 	struct ieee80211com *ic = &sc->sc_sc.sc_ic;
1650 	uint8_t *da = mtod(m, uint8_t *);
1651 	struct ether_header *eh;
1652 	int flowid, prio, fifo;
1653 	int i, found, ac;
1654 
1655 	/* No QoS for EAPOL frames. */
1656 	eh = mtod(m, struct ether_header *);
1657 	ac = (eh->ether_type != htons(ETHERTYPE_PAE)) ?
1658 	    M_WME_GETAC(m) : WME_AC_BE;
1659 
1660 	prio = ac;
1661 	fifo = bwfm_pci_prio2fifo[prio];
1662 
1663 	switch (ic->ic_opmode)
1664 	{
1665 	case IEEE80211_M_STA:
1666 		flowid = fifo;
1667 		break;
1668 #ifndef IEEE80211_STA_ONLY
1669 	case IEEE80211_M_HOSTAP:
1670 		if (ETHER_IS_MULTICAST(da))
1671 			da = __UNCONST(etherbroadcastaddr);
1672 		flowid = da[5] * 2 + fifo;
1673 		break;
1674 #endif
1675 	default:
1676 		printf("%s: state not supported\n", DEVNAME(sc));
1677 		return ENOBUFS;
1678 	}
1679 
1680 	found = 0;
1681 	flowid = flowid % sc->sc_max_flowrings;
1682 	for (i = 0; i < sc->sc_max_flowrings; i++) {
1683 		if (ic->ic_opmode == IEEE80211_M_STA &&
1684 		    sc->sc_flowrings[flowid].status >= RING_OPEN &&
1685 		    sc->sc_flowrings[flowid].fifo == fifo) {
1686 			found = 1;
1687 			break;
1688 		}
1689 #ifndef IEEE80211_STA_ONLY
1690 		if (ic->ic_opmode == IEEE80211_M_HOSTAP &&
1691 		    sc->sc_flowrings[flowid].status >= RING_OPEN &&
1692 		    sc->sc_flowrings[flowid].fifo == fifo &&
1693 		    !memcmp(sc->sc_flowrings[flowid].mac, da, ETHER_ADDR_LEN)) {
1694 			found = 1;
1695 			break;
1696 		}
1697 #endif
1698 		flowid = (flowid + 1) % sc->sc_max_flowrings;
1699 	}
1700 
1701 	if (found)
1702 		return flowid;
1703 
1704 	return -1;
1705 }
1706 
1707 void
1708 bwfm_pci_flowring_create(struct bwfm_pci_softc *sc, struct mbuf *m)
1709 {
1710 	struct ieee80211com *ic = &sc->sc_sc.sc_ic;
1711 	struct bwfm_cmd_flowring_create * cmd;
1712 	uint8_t *da = mtod(m, uint8_t *);
1713 	struct ether_header *eh;
1714 	struct bwfm_pci_msgring *ring;
1715 	int flowid, prio, fifo;
1716 	int i, found, ac;
1717 
1718 	cmd = pool_get(&sc->sc_flowring_pool, PR_NOWAIT);
1719 	if (__predict_false(cmd == NULL))
1720 		return;
1721 
1722 	/* No QoS for EAPOL frames. */
1723 	eh = mtod(m, struct ether_header *);
1724 	ac = (eh->ether_type != htons(ETHERTYPE_PAE)) ?
1725 	    M_WME_GETAC(m) : WME_AC_BE;
1726 
1727 	prio = ac;
1728 	fifo = bwfm_pci_prio2fifo[prio];
1729 
1730 	switch (ic->ic_opmode)
1731 	{
1732 	case IEEE80211_M_STA:
1733 		flowid = fifo;
1734 		break;
1735 #ifndef IEEE80211_STA_ONLY
1736 	case IEEE80211_M_HOSTAP:
1737 		if (ETHER_IS_MULTICAST(da))
1738 			da = __UNCONST(etherbroadcastaddr);
1739 		flowid = da[5] * 2 + fifo;
1740 		break;
1741 #endif
1742 	default:
1743 		printf("%s: state not supported\n", DEVNAME(sc));
1744 		return;
1745 	}
1746 
1747 	found = 0;
1748 	flowid = flowid % sc->sc_max_flowrings;
1749 	for (i = 0; i < sc->sc_max_flowrings; i++) {
1750 		ring = &sc->sc_flowrings[flowid];
1751 		if (ring->status == RING_CLOSED) {
1752 			ring->status = RING_OPENING;
1753 			found = 1;
1754 			break;
1755 		}
1756 		flowid = (flowid + 1) % sc->sc_max_flowrings;
1757 	}
1758 
1759 	/*
1760 	 * We cannot recover from that so far.  Only a stop/init
1761 	 * cycle can revive this if it ever happens at all.
1762 	 */
1763 	if (!found) {
1764 		printf("%s: no flowring available\n", DEVNAME(sc));
1765 		return;
1766 	}
1767 
1768 	cmd->sc = sc;
1769 	cmd->m = m;
1770 	cmd->prio = prio;
1771 	cmd->flowid = flowid;
1772 	workqueue_enqueue(sc->flowring_wq, &cmd->wq_cookie, NULL);
1773 }
1774 
1775 void
1776 bwfm_pci_flowring_create_cb(struct work *wk, void *arg) //(struct bwfm_softc *bwfm, void *arg)
1777 {
1778 	struct bwfm_cmd_flowring_create *cmd = container_of(wk, struct bwfm_cmd_flowring_create, wq_cookie);
1779 	struct bwfm_pci_softc *sc = cmd->sc; // (void *)bwfm;
1780 	struct ieee80211com *ic = &sc->sc_sc.sc_ic;
1781 	struct msgbuf_tx_flowring_create_req *req;
1782 	struct bwfm_pci_msgring *ring;
1783 	uint8_t *da, *sa;
1784 
1785 	da = mtod(cmd->m, char *) + 0 * ETHER_ADDR_LEN;
1786 	sa = mtod(cmd->m, char *) + 1 * ETHER_ADDR_LEN;
1787 
1788 	ring = &sc->sc_flowrings[cmd->flowid];
1789 	if (ring->status != RING_OPENING) {
1790 		printf("%s: flowring not opening\n", DEVNAME(sc));
1791 		return;
1792 	}
1793 
1794 	if (bwfm_pci_setup_flowring(sc, ring, 512, 48)) {
1795 		printf("%s: cannot setup flowring\n", DEVNAME(sc));
1796 		return;
1797 	}
1798 
1799 	req = bwfm_pci_ring_write_reserve(sc, &sc->sc_ctrl_submit);
1800 	if (req == NULL) {
1801 		printf("%s: cannot reserve for flowring\n", DEVNAME(sc));
1802 		return;
1803 	}
1804 
1805 	ring->status = RING_OPENING;
1806 	ring->fifo = bwfm_pci_prio2fifo[cmd->prio];
1807 	ring->m = cmd->m;
1808 	memcpy(ring->mac, da, ETHER_ADDR_LEN);
1809 #ifndef IEEE80211_STA_ONLY
1810 	if (ic->ic_opmode == IEEE80211_M_HOSTAP && ETHER_IS_MULTICAST(da))
1811 		memcpy(ring->mac, etherbroadcastaddr, ETHER_ADDR_LEN);
1812 #endif
1813 
1814 	req->msg.msgtype = MSGBUF_TYPE_FLOW_RING_CREATE;
1815 	req->msg.ifidx = 0;
1816 	req->msg.request_id = 0;
1817 	req->tid = bwfm_pci_prio2fifo[cmd->prio];
1818 	req->flow_ring_id = letoh16(cmd->flowid + 2);
1819 	memcpy(req->da, da, ETHER_ADDR_LEN);
1820 	memcpy(req->sa, sa, ETHER_ADDR_LEN);
1821 	req->flow_ring_addr.high_addr =
1822 	    letoh32(BWFM_PCI_DMA_DVA(ring->ring) >> 32);
1823 	req->flow_ring_addr.low_addr =
1824 	    letoh32(BWFM_PCI_DMA_DVA(ring->ring) & 0xffffffff);
1825 	req->max_items = letoh16(512);
1826 	req->len_item = letoh16(48);
1827 
1828 	bwfm_pci_ring_write_commit(sc, &sc->sc_ctrl_submit);
1829 	pool_put(&sc->sc_flowring_pool, cmd);
1830 }
1831 
1832 void
1833 bwfm_pci_flowring_delete(struct bwfm_pci_softc *sc, int flowid)
1834 {
1835 	struct msgbuf_tx_flowring_delete_req *req;
1836 	struct bwfm_pci_msgring *ring;
1837 
1838 	ring = &sc->sc_flowrings[flowid];
1839 	if (ring->status != RING_OPEN) {
1840 		printf("%s: flowring not open\n", DEVNAME(sc));
1841 		return;
1842 	}
1843 
1844 	req = bwfm_pci_ring_write_reserve(sc, &sc->sc_ctrl_submit);
1845 	if (req == NULL) {
1846 		printf("%s: cannot reserve for flowring\n", DEVNAME(sc));
1847 		return;
1848 	}
1849 
1850 	ring->status = RING_CLOSING;
1851 
1852 	req->msg.msgtype = MSGBUF_TYPE_FLOW_RING_DELETE;
1853 	req->msg.ifidx = 0;
1854 	req->msg.request_id = 0;
1855 	req->flow_ring_id = letoh16(flowid + 2);
1856 	req->reason = 0;
1857 
1858 	bwfm_pci_ring_write_commit(sc, &sc->sc_ctrl_submit);
1859 }
1860 
1861 void
1862 bwfm_pci_stop(struct bwfm_softc *bwfm)
1863 {
1864 	struct bwfm_pci_softc *sc = (void *)bwfm;
1865 	struct bwfm_pci_msgring *ring;
1866 	int i;
1867 
1868 	for (i = 0; i < sc->sc_max_flowrings; i++) {
1869 		ring = &sc->sc_flowrings[i];
1870 		if (ring->status == RING_OPEN)
1871 			bwfm_pci_flowring_delete(sc, i);
1872 	}
1873 }
1874 
1875 int
1876 bwfm_pci_txcheck(struct bwfm_softc *bwfm)
1877 {
1878 	struct bwfm_pci_softc *sc = (void *)bwfm;
1879 	struct bwfm_pci_msgring *ring;
1880 	int i;
1881 
1882 	/* If we are transitioning, we cannot send. */
1883 	for (i = 0; i < sc->sc_max_flowrings; i++) {
1884 		ring = &sc->sc_flowrings[i];
1885 		if (ring->status == RING_OPENING)
1886 			return ENOBUFS;
1887 	}
1888 
1889 	if (bwfm_pci_pktid_avail(sc, &sc->sc_tx_pkts)) {
1890 		sc->sc_tx_pkts_full = 1;
1891 		return ENOBUFS;
1892 	}
1893 
1894 	return 0;
1895 }
1896 
1897 int
1898 bwfm_pci_txdata(struct bwfm_softc *bwfm, struct mbuf **mp)
1899 {
1900 	struct bwfm_pci_softc *sc = (void *)bwfm;
1901 	struct bwfm_pci_msgring *ring;
1902 	struct msgbuf_tx_msghdr *tx;
1903 	uint32_t pktid;
1904 	paddr_t paddr;
1905 	uint64_t devaddr;
1906 	struct ether_header *eh;
1907 	int flowid, ret, ac;
1908 
1909 	flowid = bwfm_pci_flowring_lookup(sc, *mp);
1910 	if (flowid < 0) {
1911 		/*
1912 		 * We cannot send the packet right now as there is
1913 		 * no flowring yet.  The flowring will be created
1914 		 * asynchronously.  While the ring is transitioning
1915 		 * the TX check will tell the upper layers that we
1916 		 * cannot send packets right now.  When the flowring
1917 		 * is created the queue will be restarted and this
1918 		 * mbuf will be transmitted.
1919 		 */
1920 		bwfm_pci_flowring_create(sc, *mp);
1921 		return 0;
1922 	}
1923 
1924 	ring = &sc->sc_flowrings[flowid];
1925 	if (ring->status == RING_OPENING ||
1926 	    ring->status == RING_CLOSING) {
1927 		printf("%s: tried to use a flow that was "
1928 		    "transitioning in status %d\n",
1929 		    DEVNAME(sc), ring->status);
1930 		return ENOBUFS;
1931 	}
1932 
1933 	tx = bwfm_pci_ring_write_reserve(sc, ring);
1934 	if (tx == NULL)
1935 		return ENOBUFS;
1936 
1937 	/* No QoS for EAPOL frames. */
1938 	eh = mtod(*mp, struct ether_header *);
1939 	ac = (eh->ether_type != htons(ETHERTYPE_PAE)) ?
1940 	    M_WME_GETAC(*mp) : WME_AC_BE;
1941 
1942 	memset(tx, 0, sizeof(*tx));
1943 	tx->msg.msgtype = MSGBUF_TYPE_TX_POST;
1944 	tx->msg.ifidx = 0;
1945 	tx->flags = BWFM_MSGBUF_PKT_FLAGS_FRAME_802_3;
1946 	tx->flags |= ac << BWFM_MSGBUF_PKT_FLAGS_PRIO_SHIFT;
1947 	tx->seg_cnt = 1;
1948 	memcpy(tx->txhdr, mtod(*mp, char *), ETHER_HDR_LEN);
1949 
1950 	ret = bwfm_pci_pktid_new(sc, &sc->sc_tx_pkts, mp, &pktid, &paddr);
1951 	if (ret) {
1952 		if (ret == ENOBUFS) {
1953 			printf("%s: no pktid available for TX\n",
1954 			    DEVNAME(sc));
1955 			sc->sc_tx_pkts_full = 1;
1956 		}
1957 		bwfm_pci_ring_write_cancel(sc, ring, 1);
1958 		return ret;
1959 	}
1960 	devaddr = paddr + ETHER_HDR_LEN;
1961 
1962 	tx->msg.request_id = htole32(pktid);
1963 	tx->data_len = htole16((*mp)->m_len - ETHER_HDR_LEN);
1964 	tx->data_buf_addr.high_addr = htole32(devaddr >> 32);
1965 	tx->data_buf_addr.low_addr = htole32(devaddr & 0xffffffff);
1966 
1967 	bwfm_pci_ring_write_commit(sc, ring);
1968 	return 0;
1969 }
1970 
1971 #ifdef BWFM_DEBUG
1972 void
1973 bwfm_pci_debug_console(struct bwfm_pci_softc *sc)
1974 {
1975 	uint32_t newidx = bus_space_read_4(sc->sc_tcm_iot, sc->sc_tcm_ioh,
1976 	    sc->sc_console_base_addr + BWFM_CONSOLE_WRITEIDX);
1977 
1978 	if (newidx != sc->sc_console_readidx)
1979 		DPRINTFN(3, ("BWFM CONSOLE: "));
1980 	while (newidx != sc->sc_console_readidx) {
1981 		uint8_t ch = bus_space_read_1(sc->sc_tcm_iot, sc->sc_tcm_ioh,
1982 		    sc->sc_console_buf_addr + sc->sc_console_readidx);
1983 		sc->sc_console_readidx++;
1984 		if (sc->sc_console_readidx == sc->sc_console_buf_size)
1985 			sc->sc_console_readidx = 0;
1986 		if (ch == '\r')
1987 			continue;
1988 		DPRINTFN(3, ("%c", ch));
1989 	}
1990 }
1991 #endif
1992 
1993 int
1994 bwfm_pci_intr(void *v)
1995 {
1996 	struct bwfm_pci_softc *sc = (void *)v;
1997 	uint32_t status;
1998 
1999 	if ((status = bus_space_read_4(sc->sc_reg_iot, sc->sc_reg_ioh,
2000 	    BWFM_PCI_PCIE2REG_MAILBOXINT)) == 0)
2001 		return 0;
2002 
2003 	bwfm_pci_intr_disable(sc);
2004 	bus_space_write_4(sc->sc_reg_iot, sc->sc_reg_ioh,
2005 	    BWFM_PCI_PCIE2REG_MAILBOXINT, status);
2006 
2007 	if (status & (BWFM_PCI_PCIE2REG_MAILBOXMASK_INT_FN0_0 |
2008 	    BWFM_PCI_PCIE2REG_MAILBOXMASK_INT_FN0_1))
2009 		printf("%s: handle MB data\n", __func__);
2010 
2011 	if (status & BWFM_PCI_PCIE2REG_MAILBOXMASK_INT_D2H_DB) {
2012 		bwfm_pci_ring_rx(sc, &sc->sc_rx_complete);
2013 		bwfm_pci_ring_rx(sc, &sc->sc_tx_complete);
2014 		bwfm_pci_ring_rx(sc, &sc->sc_ctrl_complete);
2015 	}
2016 
2017 #ifdef BWFM_DEBUG
2018 	bwfm_pci_debug_console(sc);
2019 #endif
2020 
2021 	bwfm_pci_intr_enable(sc);
2022 	return 1;
2023 }
2024 
2025 void
2026 bwfm_pci_intr_enable(struct bwfm_pci_softc *sc)
2027 {
2028 	bus_space_write_4(sc->sc_reg_iot, sc->sc_reg_ioh,
2029 	    BWFM_PCI_PCIE2REG_MAILBOXMASK,
2030 	    BWFM_PCI_PCIE2REG_MAILBOXMASK_INT_FN0_0 |
2031 	    BWFM_PCI_PCIE2REG_MAILBOXMASK_INT_FN0_1 |
2032 	    BWFM_PCI_PCIE2REG_MAILBOXMASK_INT_D2H_DB);
2033 }
2034 
2035 void
2036 bwfm_pci_intr_disable(struct bwfm_pci_softc *sc)
2037 {
2038 	bus_space_write_4(sc->sc_reg_iot, sc->sc_reg_ioh,
2039 	    BWFM_PCI_PCIE2REG_MAILBOXMASK, 0);
2040 }
2041 
2042 /* Msgbuf protocol implementation */
2043 int
2044 bwfm_pci_msgbuf_query_dcmd(struct bwfm_softc *bwfm, int ifidx,
2045     int cmd, char *buf, size_t *len)
2046 {
2047 	struct bwfm_pci_softc *sc = (void *)bwfm;
2048 	struct msgbuf_ioctl_req_hdr *req;
2049 	struct mbuf *m;
2050 	size_t buflen;
2051 	int s;
2052 
2053 	s = splnet();
2054 	sc->sc_ioctl_resp_pktid = -1;
2055 	req = bwfm_pci_ring_write_reserve(sc, &sc->sc_ctrl_submit);
2056 	if (req == NULL) {
2057 		printf("%s: cannot reserve for write\n", DEVNAME(sc));
2058 		splx(s);
2059 		return 1;
2060 	}
2061 	req->msg.msgtype = MSGBUF_TYPE_IOCTLPTR_REQ;
2062 	req->msg.ifidx = 0;
2063 	req->msg.flags = 0;
2064 	req->msg.request_id = htole32(MSGBUF_IOCTL_REQ_PKTID);
2065 	req->cmd = htole32(cmd);
2066 	req->output_buf_len = htole16(*len);
2067 	req->trans_id = htole16(sc->sc_ioctl_reqid++);
2068 
2069 	buflen = uimin(*len, BWFM_DMA_H2D_IOCTL_BUF_LEN);
2070 	req->input_buf_len = htole16(buflen);
2071 	req->req_buf_addr.high_addr =
2072 	    htole32((uint64_t)BWFM_PCI_DMA_DVA(sc->sc_ioctl_buf) >> 32);
2073 	req->req_buf_addr.low_addr =
2074 	    htole32((uint64_t)BWFM_PCI_DMA_DVA(sc->sc_ioctl_buf) & 0xffffffff);
2075 	if (buf)
2076 		memcpy(BWFM_PCI_DMA_KVA(sc->sc_ioctl_buf), buf, buflen);
2077 	else
2078 		memset(BWFM_PCI_DMA_KVA(sc->sc_ioctl_buf), 0, buflen);
2079 
2080 	bwfm_pci_ring_write_commit(sc, &sc->sc_ctrl_submit);
2081 	splx(s);
2082 
2083 	if (tsleep(&sc->sc_ioctl_buf, PCATCH, "bwfm", hz)) {
2084 		printf("%s: timeout waiting for ioctl response\n",
2085 		    DEVNAME(sc));
2086 		return 1;
2087 	}
2088 
2089 	m = bwfm_pci_pktid_free(sc, &sc->sc_rx_pkts, sc->sc_ioctl_resp_pktid);
2090 	if (m == NULL)
2091 		return 1;
2092 
2093 	*len = uimin(buflen, sc->sc_ioctl_resp_ret_len);
2094 	if (buf)
2095 		memcpy(buf, mtod(m, char *), *len);
2096 	m_freem(m);
2097 	splx(s);
2098 
2099 	return 0;
2100 }
2101 
2102 int
2103 bwfm_pci_msgbuf_set_dcmd(struct bwfm_softc *bwfm, int ifidx,
2104     int cmd, char *buf, size_t len)
2105 {
2106 	return bwfm_pci_msgbuf_query_dcmd(bwfm, ifidx, cmd, buf, &len);
2107 }
2108