xref: /openbsd-src/sys/dev/pci/if_bwfm_pci.c (revision de8cc8edbc71bd3e3bc7fbffa27ba0e564c37d8b)
1 /*	$OpenBSD: if_bwfm_pci.c,v 1.51 2021/02/26 12:33:59 patrick Exp $	*/
2 /*
3  * Copyright (c) 2010-2016 Broadcom Corporation
4  * Copyright (c) 2017 Patrick Wildt <patrick@blueri.se>
5  *
6  * Permission to use, copy, modify, and/or distribute this software for any
7  * purpose with or without fee is hereby granted, provided that the above
8  * copyright notice and this permission notice appear in all copies.
9  *
10  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
11  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
12  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
13  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
14  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
15  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
16  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 #include "bpfilter.h"
20 
21 #include <sys/param.h>
22 #include <sys/systm.h>
23 #include <sys/buf.h>
24 #include <sys/kernel.h>
25 #include <sys/malloc.h>
26 #include <sys/device.h>
27 #include <sys/queue.h>
28 #include <sys/socket.h>
29 
30 #if defined(__HAVE_FDT)
31 #include <machine/fdt.h>
32 #include <dev/ofw/openfirm.h>
33 #endif
34 
35 #if NBPFILTER > 0
36 #include <net/bpf.h>
37 #endif
38 #include <net/if.h>
39 #include <net/if_dl.h>
40 #include <net/if_media.h>
41 
42 #include <netinet/in.h>
43 #include <netinet/if_ether.h>
44 
45 #include <net80211/ieee80211_var.h>
46 
47 #include <machine/bus.h>
48 
49 #include <dev/pci/pcireg.h>
50 #include <dev/pci/pcivar.h>
51 #include <dev/pci/pcidevs.h>
52 
53 #include <dev/ic/bwfmvar.h>
54 #include <dev/ic/bwfmreg.h>
55 #include <dev/pci/if_bwfm_pci.h>
56 
57 #define BWFM_DMA_D2H_SCRATCH_BUF_LEN		8
58 #define BWFM_DMA_D2H_RINGUPD_BUF_LEN		1024
59 #define BWFM_DMA_H2D_IOCTL_BUF_LEN		ETHER_MAX_LEN
60 
61 #define BWFM_NUM_TX_MSGRINGS			2
62 #define BWFM_NUM_RX_MSGRINGS			3
63 
64 #define BWFM_NUM_IOCTL_PKTIDS			8
65 #define BWFM_NUM_TX_PKTIDS			2048
66 #define BWFM_NUM_RX_PKTIDS			1024
67 
68 #define BWFM_NUM_IOCTL_DESCS			1
69 #define BWFM_NUM_TX_DESCS			1
70 #define BWFM_NUM_RX_DESCS			1
71 
72 #ifdef BWFM_DEBUG
73 #define DPRINTF(x)	do { if (bwfm_debug > 0) printf x; } while (0)
74 #define DPRINTFN(n, x)	do { if (bwfm_debug >= (n)) printf x; } while (0)
75 static int bwfm_debug = 2;
76 #else
77 #define DPRINTF(x)	do { ; } while (0)
78 #define DPRINTFN(n, x)	do { ; } while (0)
79 #endif
80 
81 #define DEVNAME(sc)	((sc)->sc_sc.sc_dev.dv_xname)
82 
83 enum ring_status {
84 	RING_CLOSED,
85 	RING_CLOSING,
86 	RING_OPEN,
87 	RING_OPENING,
88 };
89 
90 struct bwfm_pci_msgring {
91 	uint32_t		 w_idx_addr;
92 	uint32_t		 r_idx_addr;
93 	uint32_t		 w_ptr;
94 	uint32_t		 r_ptr;
95 	int			 nitem;
96 	int			 itemsz;
97 	enum ring_status	 status;
98 	struct bwfm_pci_dmamem	*ring;
99 	struct mbuf		*m;
100 
101 	int			 fifo;
102 	uint8_t			 mac[ETHER_ADDR_LEN];
103 };
104 
105 struct bwfm_pci_ioctl {
106 	uint16_t		 transid;
107 	uint16_t		 retlen;
108 	int16_t			 status;
109 	struct mbuf		*m;
110 	TAILQ_ENTRY(bwfm_pci_ioctl) next;
111 };
112 
113 struct bwfm_pci_buf {
114 	bus_dmamap_t	 bb_map;
115 	struct mbuf	*bb_m;
116 };
117 
118 struct bwfm_pci_pkts {
119 	struct bwfm_pci_buf	*pkts;
120 	uint32_t		 npkt;
121 	int			 last;
122 };
123 
124 struct bwfm_pci_softc {
125 	struct bwfm_softc	 sc_sc;
126 	pci_chipset_tag_t	 sc_pc;
127 	pcitag_t		 sc_tag;
128 	pcireg_t		 sc_id;
129 	void 			*sc_ih;
130 
131 	int			 sc_initialized;
132 
133 	bus_space_tag_t		 sc_reg_iot;
134 	bus_space_handle_t	 sc_reg_ioh;
135 	bus_size_t		 sc_reg_ios;
136 
137 	bus_space_tag_t		 sc_tcm_iot;
138 	bus_space_handle_t	 sc_tcm_ioh;
139 	bus_size_t		 sc_tcm_ios;
140 
141 	bus_dma_tag_t		 sc_dmat;
142 
143 	uint32_t		 sc_shared_address;
144 	uint32_t		 sc_shared_flags;
145 	uint8_t			 sc_shared_version;
146 
147 	uint8_t			 sc_dma_idx_sz;
148 	struct bwfm_pci_dmamem	*sc_dma_idx_buf;
149 	size_t			 sc_dma_idx_bufsz;
150 
151 	uint16_t		 sc_max_rxbufpost;
152 	uint32_t		 sc_rx_dataoffset;
153 	uint32_t		 sc_htod_mb_data_addr;
154 	uint32_t		 sc_dtoh_mb_data_addr;
155 	uint32_t		 sc_ring_info_addr;
156 
157 	uint32_t		 sc_console_base_addr;
158 	uint32_t		 sc_console_buf_addr;
159 	uint32_t		 sc_console_buf_size;
160 	uint32_t		 sc_console_readidx;
161 
162 	uint16_t		 sc_max_flowrings;
163 	uint16_t		 sc_max_submissionrings;
164 	uint16_t		 sc_max_completionrings;
165 
166 	struct bwfm_pci_msgring	 sc_ctrl_submit;
167 	struct bwfm_pci_msgring	 sc_rxpost_submit;
168 	struct bwfm_pci_msgring	 sc_ctrl_complete;
169 	struct bwfm_pci_msgring	 sc_tx_complete;
170 	struct bwfm_pci_msgring	 sc_rx_complete;
171 	struct bwfm_pci_msgring	*sc_flowrings;
172 
173 	struct bwfm_pci_dmamem	*sc_scratch_buf;
174 	struct bwfm_pci_dmamem	*sc_ringupd_buf;
175 
176 	TAILQ_HEAD(, bwfm_pci_ioctl) sc_ioctlq;
177 	uint16_t		 sc_ioctl_transid;
178 
179 	struct if_rxring	 sc_ioctl_ring;
180 	struct if_rxring	 sc_event_ring;
181 	struct if_rxring	 sc_rxbuf_ring;
182 
183 	struct bwfm_pci_pkts	 sc_ioctl_pkts;
184 	struct bwfm_pci_pkts	 sc_rx_pkts;
185 	struct bwfm_pci_pkts	 sc_tx_pkts;
186 	int			 sc_tx_pkts_full;
187 };
188 
189 struct bwfm_pci_dmamem {
190 	bus_dmamap_t		bdm_map;
191 	bus_dma_segment_t	bdm_seg;
192 	size_t			bdm_size;
193 	caddr_t			bdm_kva;
194 };
195 
196 #define BWFM_PCI_DMA_MAP(_bdm)	((_bdm)->bdm_map)
197 #define BWFM_PCI_DMA_LEN(_bdm)	((_bdm)->bdm_size)
198 #define BWFM_PCI_DMA_DVA(_bdm)	((uint64_t)(_bdm)->bdm_map->dm_segs[0].ds_addr)
199 #define BWFM_PCI_DMA_KVA(_bdm)	((void *)(_bdm)->bdm_kva)
200 
201 int		 bwfm_pci_match(struct device *, void *, void *);
202 void		 bwfm_pci_attach(struct device *, struct device *, void *);
203 int		 bwfm_pci_detach(struct device *, int);
204 
205 #if defined(__HAVE_FDT)
206 int		 bwfm_pci_read_otp(struct bwfm_pci_softc *);
207 void		 bwfm_pci_process_otp_tuple(struct bwfm_pci_softc *, uint8_t,
208 		    uint8_t, uint8_t *);
209 #endif
210 
211 int		 bwfm_pci_intr(void *);
212 void		 bwfm_pci_intr_enable(struct bwfm_pci_softc *);
213 void		 bwfm_pci_intr_disable(struct bwfm_pci_softc *);
214 uint32_t	 bwfm_pci_intr_status(struct bwfm_pci_softc *);
215 void		 bwfm_pci_intr_ack(struct bwfm_pci_softc *, uint32_t);
216 void		 bwfm_pci_hostready(struct bwfm_pci_softc *);
217 int		 bwfm_pci_load_microcode(struct bwfm_pci_softc *, const u_char *,
218 		    size_t, const u_char *, size_t);
219 void		 bwfm_pci_select_core(struct bwfm_pci_softc *, int );
220 
221 struct bwfm_pci_dmamem *
222 		 bwfm_pci_dmamem_alloc(struct bwfm_pci_softc *, bus_size_t,
223 		    bus_size_t);
224 void		 bwfm_pci_dmamem_free(struct bwfm_pci_softc *, struct bwfm_pci_dmamem *);
225 int		 bwfm_pci_pktid_avail(struct bwfm_pci_softc *,
226 		    struct bwfm_pci_pkts *);
227 int		 bwfm_pci_pktid_new(struct bwfm_pci_softc *,
228 		    struct bwfm_pci_pkts *, struct mbuf *,
229 		    uint32_t *, paddr_t *);
230 struct mbuf *	 bwfm_pci_pktid_free(struct bwfm_pci_softc *,
231 		    struct bwfm_pci_pkts *, uint32_t);
232 void		 bwfm_pci_fill_rx_ioctl_ring(struct bwfm_pci_softc *,
233 		    struct if_rxring *, uint32_t);
234 void		 bwfm_pci_fill_rx_buf_ring(struct bwfm_pci_softc *);
235 void		 bwfm_pci_fill_rx_rings(struct bwfm_pci_softc *);
236 int		 bwfm_pci_setup_ring(struct bwfm_pci_softc *, struct bwfm_pci_msgring *,
237 		    int, size_t, uint32_t, uint32_t, int, uint32_t, uint32_t *);
238 int		 bwfm_pci_setup_flowring(struct bwfm_pci_softc *, struct bwfm_pci_msgring *,
239 		    int, size_t);
240 
241 void		 bwfm_pci_ring_bell(struct bwfm_pci_softc *,
242 		    struct bwfm_pci_msgring *);
243 void		 bwfm_pci_ring_update_rptr(struct bwfm_pci_softc *,
244 		    struct bwfm_pci_msgring *);
245 void		 bwfm_pci_ring_update_wptr(struct bwfm_pci_softc *,
246 		    struct bwfm_pci_msgring *);
247 void		 bwfm_pci_ring_write_rptr(struct bwfm_pci_softc *,
248 		    struct bwfm_pci_msgring *);
249 void		 bwfm_pci_ring_write_wptr(struct bwfm_pci_softc *,
250 		    struct bwfm_pci_msgring *);
251 void *		 bwfm_pci_ring_write_reserve(struct bwfm_pci_softc *,
252 		    struct bwfm_pci_msgring *);
253 void *		 bwfm_pci_ring_write_reserve_multi(struct bwfm_pci_softc *,
254 		    struct bwfm_pci_msgring *, int, int *);
255 void *		 bwfm_pci_ring_read_avail(struct bwfm_pci_softc *,
256 		    struct bwfm_pci_msgring *, int *);
257 void		 bwfm_pci_ring_read_commit(struct bwfm_pci_softc *,
258 		    struct bwfm_pci_msgring *, int);
259 void		 bwfm_pci_ring_write_commit(struct bwfm_pci_softc *,
260 		    struct bwfm_pci_msgring *);
261 void		 bwfm_pci_ring_write_cancel(struct bwfm_pci_softc *,
262 		    struct bwfm_pci_msgring *, int);
263 
264 void		 bwfm_pci_ring_rx(struct bwfm_pci_softc *,
265 		    struct bwfm_pci_msgring *, struct mbuf_list *);
266 void		 bwfm_pci_msg_rx(struct bwfm_pci_softc *, void *,
267 		    struct mbuf_list *);
268 
269 uint32_t	 bwfm_pci_buscore_read(struct bwfm_softc *, uint32_t);
270 void		 bwfm_pci_buscore_write(struct bwfm_softc *, uint32_t,
271 		    uint32_t);
272 int		 bwfm_pci_buscore_prepare(struct bwfm_softc *);
273 int		 bwfm_pci_buscore_reset(struct bwfm_softc *);
274 void		 bwfm_pci_buscore_activate(struct bwfm_softc *, uint32_t);
275 
276 int		 bwfm_pci_flowring_lookup(struct bwfm_pci_softc *,
277 		     struct mbuf *);
278 void		 bwfm_pci_flowring_create(struct bwfm_pci_softc *,
279 		     struct mbuf *);
280 void		 bwfm_pci_flowring_create_cb(struct bwfm_softc *, void *);
281 void		 bwfm_pci_flowring_delete(struct bwfm_pci_softc *, int);
282 
283 int		 bwfm_pci_preinit(struct bwfm_softc *);
284 void		 bwfm_pci_stop(struct bwfm_softc *);
285 int		 bwfm_pci_txcheck(struct bwfm_softc *);
286 int		 bwfm_pci_txdata(struct bwfm_softc *, struct mbuf *);
287 
288 #ifdef BWFM_DEBUG
289 void		 bwfm_pci_debug_console(struct bwfm_pci_softc *);
290 #endif
291 
292 int		 bwfm_pci_msgbuf_query_dcmd(struct bwfm_softc *, int,
293 		    int, char *, size_t *);
294 int		 bwfm_pci_msgbuf_set_dcmd(struct bwfm_softc *, int,
295 		    int, char *, size_t);
296 void		 bwfm_pci_msgbuf_rxioctl(struct bwfm_pci_softc *,
297 		    struct msgbuf_ioctl_resp_hdr *);
298 
299 struct bwfm_buscore_ops bwfm_pci_buscore_ops = {
300 	.bc_read = bwfm_pci_buscore_read,
301 	.bc_write = bwfm_pci_buscore_write,
302 	.bc_prepare = bwfm_pci_buscore_prepare,
303 	.bc_reset = bwfm_pci_buscore_reset,
304 	.bc_setup = NULL,
305 	.bc_activate = bwfm_pci_buscore_activate,
306 };
307 
308 struct bwfm_bus_ops bwfm_pci_bus_ops = {
309 	.bs_preinit = bwfm_pci_preinit,
310 	.bs_stop = bwfm_pci_stop,
311 	.bs_txcheck = bwfm_pci_txcheck,
312 	.bs_txdata = bwfm_pci_txdata,
313 	.bs_txctl = NULL,
314 };
315 
316 struct bwfm_proto_ops bwfm_pci_msgbuf_ops = {
317 	.proto_query_dcmd = bwfm_pci_msgbuf_query_dcmd,
318 	.proto_set_dcmd = bwfm_pci_msgbuf_set_dcmd,
319 	.proto_rx = NULL,
320 	.proto_rxctl = NULL,
321 };
322 
323 struct cfattach bwfm_pci_ca = {
324 	sizeof(struct bwfm_pci_softc),
325 	bwfm_pci_match,
326 	bwfm_pci_attach,
327 	bwfm_pci_detach,
328 };
329 
330 static const struct pci_matchid bwfm_pci_devices[] = {
331 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM4350 },
332 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM4356 },
333 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM43602 },
334 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM4371 },
335 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM4378 },
336 };
337 
338 int
339 bwfm_pci_match(struct device *parent, void *match, void *aux)
340 {
341 	return (pci_matchbyid(aux, bwfm_pci_devices,
342 	    nitems(bwfm_pci_devices)));
343 }
344 
345 void
346 bwfm_pci_attach(struct device *parent, struct device *self, void *aux)
347 {
348 	struct bwfm_pci_softc *sc = (struct bwfm_pci_softc *)self;
349 	struct pci_attach_args *pa = (struct pci_attach_args *)aux;
350 	const char *intrstr;
351 	pci_intr_handle_t ih;
352 
353 	if (pci_mapreg_map(pa, PCI_MAPREG_START + 0x08,
354 	    PCI_MAPREG_MEM_TYPE_64BIT, 0, &sc->sc_tcm_iot, &sc->sc_tcm_ioh,
355 	    NULL, &sc->sc_tcm_ios, 0)) {
356 		printf(": can't map bar1\n");
357 		return;
358 	}
359 
360 	if (pci_mapreg_map(pa, PCI_MAPREG_START + 0x00,
361 	    PCI_MAPREG_MEM_TYPE_64BIT, 0, &sc->sc_reg_iot, &sc->sc_reg_ioh,
362 	    NULL, &sc->sc_reg_ios, 0)) {
363 		printf(": can't map bar0\n");
364 		goto bar1;
365 	}
366 
367 	sc->sc_pc = pa->pa_pc;
368 	sc->sc_tag = pa->pa_tag;
369 	sc->sc_id = pa->pa_id;
370 	sc->sc_dmat = pa->pa_dmat;
371 
372 	/* Map and establish the interrupt. */
373 	if (pci_intr_map_msi(pa, &ih) != 0 && pci_intr_map(pa, &ih) != 0) {
374 		printf(": couldn't map interrupt\n");
375 		goto bar0;
376 	}
377 	intrstr = pci_intr_string(pa->pa_pc, ih);
378 
379 	sc->sc_ih = pci_intr_establish(pa->pa_pc, ih, IPL_NET | IPL_MPSAFE,
380 	    bwfm_pci_intr, sc, DEVNAME(sc));
381 	if (sc->sc_ih == NULL) {
382 		printf(": couldn't establish interrupt");
383 		if (intrstr != NULL)
384 			printf(" at %s", intrstr);
385 		printf("\n");
386 		goto bar1;
387 	}
388 	printf(": %s\n", intrstr);
389 
390 	sc->sc_sc.sc_bus_ops = &bwfm_pci_bus_ops;
391 	sc->sc_sc.sc_proto_ops = &bwfm_pci_msgbuf_ops;
392 	bwfm_attach(&sc->sc_sc);
393 	config_mountroot(self, bwfm_attachhook);
394 	return;
395 
396 bar0:
397 	bus_space_unmap(sc->sc_reg_iot, sc->sc_reg_ioh, sc->sc_reg_ios);
398 bar1:
399 	bus_space_unmap(sc->sc_tcm_iot, sc->sc_tcm_ioh, sc->sc_tcm_ios);
400 }
401 
402 int
403 bwfm_pci_preinit(struct bwfm_softc *bwfm)
404 {
405 	struct bwfm_pci_softc *sc = (void *)bwfm;
406 	struct bwfm_pci_ringinfo ringinfo;
407 	const char *chip = NULL;
408 	u_char *ucode, *nvram;
409 	size_t size, nvsize, nvlen;
410 	uint32_t d2h_w_idx_ptr, d2h_r_idx_ptr;
411 	uint32_t h2d_w_idx_ptr, h2d_r_idx_ptr;
412 	uint32_t idx_offset, reg;
413 	int i;
414 
415 	if (sc->sc_initialized)
416 		return 0;
417 
418 	sc->sc_sc.sc_buscore_ops = &bwfm_pci_buscore_ops;
419 	if (bwfm_chip_attach(&sc->sc_sc) != 0) {
420 		printf("%s: cannot attach chip\n", DEVNAME(sc));
421 		return 1;
422 	}
423 
424 #if defined(__HAVE_FDT)
425 	if (bwfm_pci_read_otp(sc)) {
426 		printf("%s: cannot read OTP\n", DEVNAME(sc));
427 		return 1;
428 	}
429 #endif
430 
431 	bwfm_pci_select_core(sc, BWFM_AGENT_CORE_PCIE2);
432 	bus_space_write_4(sc->sc_reg_iot, sc->sc_reg_ioh,
433 	    BWFM_PCI_PCIE2REG_CONFIGADDR, 0x4e0);
434 	reg = bus_space_read_4(sc->sc_reg_iot, sc->sc_reg_ioh,
435 	    BWFM_PCI_PCIE2REG_CONFIGDATA);
436 	bus_space_write_4(sc->sc_reg_iot, sc->sc_reg_ioh,
437 	    BWFM_PCI_PCIE2REG_CONFIGDATA, reg);
438 
439 	switch (bwfm->sc_chip.ch_chip)
440 	{
441 	case BRCM_CC_4350_CHIP_ID:
442 		if (bwfm->sc_chip.ch_chiprev > 7)
443 			chip = "4350";
444 		else
445 			chip = "4350c2";
446 		break;
447 	case BRCM_CC_4356_CHIP_ID:
448 		chip = "4356";
449 		break;
450 	case BRCM_CC_43602_CHIP_ID:
451 		chip = "43602";
452 		break;
453 	case BRCM_CC_4371_CHIP_ID:
454 		chip = "4371";
455 		break;
456 	case BRCM_CC_4378_CHIP_ID:
457 		chip = "4378";
458 		break;
459 	default:
460 		printf("%s: unknown firmware for chip %s\n",
461 		    DEVNAME(sc), bwfm->sc_chip.ch_name);
462 		return 1;
463 	}
464 
465 	if (bwfm_loadfirmware(bwfm, chip, "-pcie", &ucode, &size,
466 	    &nvram, &nvsize, &nvlen) != 0)
467 		return 1;
468 
469 	/* Retrieve RAM size from firmware. */
470 	if (size >= BWFM_RAMSIZE + 8) {
471 		uint32_t *ramsize = (uint32_t *)&ucode[BWFM_RAMSIZE];
472 		if (letoh32(ramsize[0]) == BWFM_RAMSIZE_MAGIC)
473 			bwfm->sc_chip.ch_ramsize = letoh32(ramsize[1]);
474 	}
475 
476 	if (bwfm_pci_load_microcode(sc, ucode, size, nvram, nvlen) != 0) {
477 		printf("%s: could not load microcode\n",
478 		    DEVNAME(sc));
479 		free(ucode, M_DEVBUF, size);
480 		free(nvram, M_DEVBUF, nvsize);
481 		return 1;
482 	}
483 	free(ucode, M_DEVBUF, size);
484 	free(nvram, M_DEVBUF, nvsize);
485 
486 	sc->sc_shared_flags = bus_space_read_4(sc->sc_tcm_iot, sc->sc_tcm_ioh,
487 	    sc->sc_shared_address + BWFM_SHARED_INFO);
488 	sc->sc_shared_version = sc->sc_shared_flags;
489 	if (sc->sc_shared_version > BWFM_SHARED_INFO_MAX_VERSION ||
490 	    sc->sc_shared_version < BWFM_SHARED_INFO_MIN_VERSION) {
491 		printf("%s: PCIe version %d unsupported\n",
492 		    DEVNAME(sc), sc->sc_shared_version);
493 		return 1;
494 	}
495 
496 	if (sc->sc_shared_flags & BWFM_SHARED_INFO_DMA_INDEX) {
497 		if (sc->sc_shared_flags & BWFM_SHARED_INFO_DMA_2B_IDX)
498 			sc->sc_dma_idx_sz = sizeof(uint16_t);
499 		else
500 			sc->sc_dma_idx_sz = sizeof(uint32_t);
501 	}
502 
503 	/* Maximum RX data buffers in the ring. */
504 	sc->sc_max_rxbufpost = bus_space_read_2(sc->sc_tcm_iot, sc->sc_tcm_ioh,
505 	    sc->sc_shared_address + BWFM_SHARED_MAX_RXBUFPOST);
506 	if (sc->sc_max_rxbufpost == 0)
507 		sc->sc_max_rxbufpost = BWFM_SHARED_MAX_RXBUFPOST_DEFAULT;
508 
509 	/* Alternative offset of data in a packet */
510 	sc->sc_rx_dataoffset = bus_space_read_4(sc->sc_tcm_iot, sc->sc_tcm_ioh,
511 	    sc->sc_shared_address + BWFM_SHARED_RX_DATAOFFSET);
512 
513 	/* For Power Management */
514 	sc->sc_htod_mb_data_addr = bus_space_read_4(sc->sc_tcm_iot, sc->sc_tcm_ioh,
515 	    sc->sc_shared_address + BWFM_SHARED_HTOD_MB_DATA_ADDR);
516 	sc->sc_dtoh_mb_data_addr = bus_space_read_4(sc->sc_tcm_iot, sc->sc_tcm_ioh,
517 	    sc->sc_shared_address + BWFM_SHARED_DTOH_MB_DATA_ADDR);
518 
519 	/* Ring information */
520 	sc->sc_ring_info_addr = bus_space_read_4(sc->sc_tcm_iot, sc->sc_tcm_ioh,
521 	    sc->sc_shared_address + BWFM_SHARED_RING_INFO_ADDR);
522 
523 	/* Firmware's "dmesg" */
524 	sc->sc_console_base_addr = bus_space_read_4(sc->sc_tcm_iot, sc->sc_tcm_ioh,
525 	    sc->sc_shared_address + BWFM_SHARED_CONSOLE_ADDR);
526 	sc->sc_console_buf_addr = bus_space_read_4(sc->sc_tcm_iot, sc->sc_tcm_ioh,
527 	    sc->sc_console_base_addr + BWFM_CONSOLE_BUFADDR);
528 	sc->sc_console_buf_size = bus_space_read_4(sc->sc_tcm_iot, sc->sc_tcm_ioh,
529 	    sc->sc_console_base_addr + BWFM_CONSOLE_BUFSIZE);
530 
531 	/* Read ring information. */
532 	bus_space_read_region_1(sc->sc_tcm_iot, sc->sc_tcm_ioh,
533 	    sc->sc_ring_info_addr, (void *)&ringinfo, sizeof(ringinfo));
534 
535 	if (sc->sc_shared_version >= 6) {
536 		sc->sc_max_submissionrings = le16toh(ringinfo.max_submissionrings);
537 		sc->sc_max_flowrings = le16toh(ringinfo.max_flowrings);
538 		sc->sc_max_completionrings = le16toh(ringinfo.max_completionrings);
539 	} else {
540 		sc->sc_max_submissionrings = le16toh(ringinfo.max_flowrings);
541 		sc->sc_max_flowrings = sc->sc_max_submissionrings -
542 		    BWFM_NUM_TX_MSGRINGS;
543 		sc->sc_max_completionrings = BWFM_NUM_RX_MSGRINGS;
544 	}
545 
546 	if (sc->sc_dma_idx_sz == 0) {
547 		d2h_w_idx_ptr = letoh32(ringinfo.d2h_w_idx_ptr);
548 		d2h_r_idx_ptr = letoh32(ringinfo.d2h_r_idx_ptr);
549 		h2d_w_idx_ptr = letoh32(ringinfo.h2d_w_idx_ptr);
550 		h2d_r_idx_ptr = letoh32(ringinfo.h2d_r_idx_ptr);
551 		idx_offset = sizeof(uint32_t);
552 	} else {
553 		uint64_t address;
554 
555 		/* Each TX/RX Ring has a Read and Write Ptr */
556 		sc->sc_dma_idx_bufsz = (sc->sc_max_submissionrings +
557 		    sc->sc_max_completionrings) * sc->sc_dma_idx_sz * 2;
558 		sc->sc_dma_idx_buf = bwfm_pci_dmamem_alloc(sc,
559 		    sc->sc_dma_idx_bufsz, 8);
560 		if (sc->sc_dma_idx_buf == NULL) {
561 			/* XXX: Fallback to TCM? */
562 			printf("%s: cannot allocate idx buf\n",
563 			    DEVNAME(sc));
564 			return 1;
565 		}
566 
567 		idx_offset = sc->sc_dma_idx_sz;
568 		h2d_w_idx_ptr = 0;
569 		address = BWFM_PCI_DMA_DVA(sc->sc_dma_idx_buf);
570 		ringinfo.h2d_w_idx_hostaddr_low =
571 		    htole32(address & 0xffffffff);
572 		ringinfo.h2d_w_idx_hostaddr_high =
573 		    htole32(address >> 32);
574 
575 		h2d_r_idx_ptr = h2d_w_idx_ptr +
576 		    sc->sc_max_submissionrings * idx_offset;
577 		address += sc->sc_max_submissionrings * idx_offset;
578 		ringinfo.h2d_r_idx_hostaddr_low =
579 		    htole32(address & 0xffffffff);
580 		ringinfo.h2d_r_idx_hostaddr_high =
581 		    htole32(address >> 32);
582 
583 		d2h_w_idx_ptr = h2d_r_idx_ptr +
584 		    sc->sc_max_submissionrings * idx_offset;
585 		address += sc->sc_max_submissionrings * idx_offset;
586 		ringinfo.d2h_w_idx_hostaddr_low =
587 		    htole32(address & 0xffffffff);
588 		ringinfo.d2h_w_idx_hostaddr_high =
589 		    htole32(address >> 32);
590 
591 		d2h_r_idx_ptr = d2h_w_idx_ptr +
592 		    sc->sc_max_completionrings * idx_offset;
593 		address += sc->sc_max_completionrings * idx_offset;
594 		ringinfo.d2h_r_idx_hostaddr_low =
595 		    htole32(address & 0xffffffff);
596 		ringinfo.d2h_r_idx_hostaddr_high =
597 		    htole32(address >> 32);
598 
599 		bus_space_write_region_1(sc->sc_tcm_iot, sc->sc_tcm_ioh,
600 		    sc->sc_ring_info_addr, (void *)&ringinfo, sizeof(ringinfo));
601 	}
602 
603 	uint32_t ring_mem_ptr = letoh32(ringinfo.ringmem);
604 	/* TX ctrl ring: Send ctrl buffers, send IOCTLs */
605 	if (bwfm_pci_setup_ring(sc, &sc->sc_ctrl_submit, 64, 40,
606 	    h2d_w_idx_ptr, h2d_r_idx_ptr, 0, idx_offset,
607 	    &ring_mem_ptr))
608 		goto cleanup;
609 	/* TX rxpost ring: Send clean data mbufs for RX */
610 	if (bwfm_pci_setup_ring(sc, &sc->sc_rxpost_submit, 512, 32,
611 	    h2d_w_idx_ptr, h2d_r_idx_ptr, 1, idx_offset,
612 	    &ring_mem_ptr))
613 		goto cleanup;
614 	/* RX completion rings: recv our filled buffers back */
615 	if (bwfm_pci_setup_ring(sc, &sc->sc_ctrl_complete, 64, 24,
616 	    d2h_w_idx_ptr, d2h_r_idx_ptr, 0, idx_offset,
617 	    &ring_mem_ptr))
618 		goto cleanup;
619 	if (bwfm_pci_setup_ring(sc, &sc->sc_tx_complete, 1024,
620 	    sc->sc_shared_version >= 7 ? 24 : 16,
621 	    d2h_w_idx_ptr, d2h_r_idx_ptr, 1, idx_offset,
622 	    &ring_mem_ptr))
623 		goto cleanup;
624 	if (bwfm_pci_setup_ring(sc, &sc->sc_rx_complete, 512,
625 	    sc->sc_shared_version >= 7 ? 40 : 32,
626 	    d2h_w_idx_ptr, d2h_r_idx_ptr, 2, idx_offset,
627 	    &ring_mem_ptr))
628 		goto cleanup;
629 
630 	/* Dynamic TX rings for actual data */
631 	sc->sc_flowrings = malloc(sc->sc_max_flowrings *
632 	    sizeof(struct bwfm_pci_msgring), M_DEVBUF, M_WAITOK | M_ZERO);
633 	for (i = 0; i < sc->sc_max_flowrings; i++) {
634 		struct bwfm_pci_msgring *ring = &sc->sc_flowrings[i];
635 		ring->w_idx_addr = h2d_w_idx_ptr + (i + 2) * idx_offset;
636 		ring->r_idx_addr = h2d_r_idx_ptr + (i + 2) * idx_offset;
637 	}
638 
639 	/* Scratch and ring update buffers for firmware */
640 	if ((sc->sc_scratch_buf = bwfm_pci_dmamem_alloc(sc,
641 	    BWFM_DMA_D2H_SCRATCH_BUF_LEN, 8)) == NULL)
642 		goto cleanup;
643 	bus_space_write_4(sc->sc_tcm_iot, sc->sc_tcm_ioh,
644 	    sc->sc_shared_address + BWFM_SHARED_DMA_SCRATCH_ADDR_LOW,
645 	    BWFM_PCI_DMA_DVA(sc->sc_scratch_buf) & 0xffffffff);
646 	bus_space_write_4(sc->sc_tcm_iot, sc->sc_tcm_ioh,
647 	    sc->sc_shared_address + BWFM_SHARED_DMA_SCRATCH_ADDR_HIGH,
648 	    BWFM_PCI_DMA_DVA(sc->sc_scratch_buf) >> 32);
649 	bus_space_write_4(sc->sc_tcm_iot, sc->sc_tcm_ioh,
650 	    sc->sc_shared_address + BWFM_SHARED_DMA_SCRATCH_LEN,
651 	    BWFM_DMA_D2H_SCRATCH_BUF_LEN);
652 
653 	if ((sc->sc_ringupd_buf = bwfm_pci_dmamem_alloc(sc,
654 	    BWFM_DMA_D2H_RINGUPD_BUF_LEN, 8)) == NULL)
655 		goto cleanup;
656 	bus_space_write_4(sc->sc_tcm_iot, sc->sc_tcm_ioh,
657 	    sc->sc_shared_address + BWFM_SHARED_DMA_RINGUPD_ADDR_LOW,
658 	    BWFM_PCI_DMA_DVA(sc->sc_ringupd_buf) & 0xffffffff);
659 	bus_space_write_4(sc->sc_tcm_iot, sc->sc_tcm_ioh,
660 	    sc->sc_shared_address + BWFM_SHARED_DMA_RINGUPD_ADDR_HIGH,
661 	    BWFM_PCI_DMA_DVA(sc->sc_ringupd_buf) >> 32);
662 	bus_space_write_4(sc->sc_tcm_iot, sc->sc_tcm_ioh,
663 	    sc->sc_shared_address + BWFM_SHARED_DMA_RINGUPD_LEN,
664 	    BWFM_DMA_D2H_RINGUPD_BUF_LEN);
665 
666 	bwfm_pci_select_core(sc, BWFM_AGENT_CORE_PCIE2);
667 	bwfm_pci_intr_enable(sc);
668 	bwfm_pci_hostready(sc);
669 
670 	/* Maps RX mbufs to a packet id and back. */
671 	sc->sc_rx_pkts.npkt = BWFM_NUM_RX_PKTIDS;
672 	sc->sc_rx_pkts.pkts = malloc(BWFM_NUM_RX_PKTIDS *
673 	    sizeof(struct bwfm_pci_buf), M_DEVBUF, M_WAITOK | M_ZERO);
674 	for (i = 0; i < BWFM_NUM_RX_PKTIDS; i++)
675 		bus_dmamap_create(sc->sc_dmat, MSGBUF_MAX_CTL_PKT_SIZE,
676 		    BWFM_NUM_RX_DESCS, MSGBUF_MAX_CTL_PKT_SIZE, 0, BUS_DMA_WAITOK,
677 		    &sc->sc_rx_pkts.pkts[i].bb_map);
678 
679 	/* Maps TX mbufs to a packet id and back. */
680 	sc->sc_tx_pkts.npkt = BWFM_NUM_TX_PKTIDS;
681 	sc->sc_tx_pkts.pkts = malloc(BWFM_NUM_TX_PKTIDS
682 	    * sizeof(struct bwfm_pci_buf), M_DEVBUF, M_WAITOK | M_ZERO);
683 	for (i = 0; i < BWFM_NUM_TX_PKTIDS; i++)
684 		bus_dmamap_create(sc->sc_dmat, MSGBUF_MAX_PKT_SIZE,
685 		    BWFM_NUM_TX_DESCS, MSGBUF_MAX_PKT_SIZE, 0, BUS_DMA_WAITOK,
686 		    &sc->sc_tx_pkts.pkts[i].bb_map);
687 
688 	/* Maps IOCTL mbufs to a packet id and back. */
689 	sc->sc_ioctl_pkts.npkt = BWFM_NUM_IOCTL_PKTIDS;
690 	sc->sc_ioctl_pkts.pkts = malloc(BWFM_NUM_IOCTL_PKTIDS
691 	    * sizeof(struct bwfm_pci_buf), M_DEVBUF, M_WAITOK | M_ZERO);
692 	for (i = 0; i < BWFM_NUM_IOCTL_PKTIDS; i++)
693 		bus_dmamap_create(sc->sc_dmat, MSGBUF_MAX_PKT_SIZE,
694 		    BWFM_NUM_IOCTL_DESCS, MSGBUF_MAX_PKT_SIZE, 0, BUS_DMA_WAITOK,
695 		    &sc->sc_ioctl_pkts.pkts[i].bb_map);
696 
697 	/*
698 	 * For whatever reason, could also be a bug somewhere in this
699 	 * driver, the firmware needs a bunch of RX buffers otherwise
700 	 * it won't send any RX complete messages.
701 	 */
702 	if_rxr_init(&sc->sc_rxbuf_ring, min(256, sc->sc_max_rxbufpost),
703 	    sc->sc_max_rxbufpost);
704 	if_rxr_init(&sc->sc_ioctl_ring, 8, 8);
705 	if_rxr_init(&sc->sc_event_ring, 8, 8);
706 	bwfm_pci_fill_rx_rings(sc);
707 
708 	TAILQ_INIT(&sc->sc_ioctlq);
709 
710 #ifdef BWFM_DEBUG
711 	sc->sc_console_readidx = 0;
712 	bwfm_pci_debug_console(sc);
713 #endif
714 
715 	sc->sc_initialized = 1;
716 	return 0;
717 
718 cleanup:
719 	if (sc->sc_ringupd_buf)
720 		bwfm_pci_dmamem_free(sc, sc->sc_ringupd_buf);
721 	if (sc->sc_scratch_buf)
722 		bwfm_pci_dmamem_free(sc, sc->sc_scratch_buf);
723 	if (sc->sc_rx_complete.ring)
724 		bwfm_pci_dmamem_free(sc, sc->sc_rx_complete.ring);
725 	if (sc->sc_tx_complete.ring)
726 		bwfm_pci_dmamem_free(sc, sc->sc_tx_complete.ring);
727 	if (sc->sc_ctrl_complete.ring)
728 		bwfm_pci_dmamem_free(sc, sc->sc_ctrl_complete.ring);
729 	if (sc->sc_rxpost_submit.ring)
730 		bwfm_pci_dmamem_free(sc, sc->sc_rxpost_submit.ring);
731 	if (sc->sc_ctrl_submit.ring)
732 		bwfm_pci_dmamem_free(sc, sc->sc_ctrl_submit.ring);
733 	if (sc->sc_dma_idx_buf)
734 		bwfm_pci_dmamem_free(sc, sc->sc_dma_idx_buf);
735 	return 1;
736 }
737 
738 int
739 bwfm_pci_load_microcode(struct bwfm_pci_softc *sc, const u_char *ucode, size_t size,
740     const u_char *nvram, size_t nvlen)
741 {
742 	struct bwfm_softc *bwfm = (void *)sc;
743 	struct bwfm_core *core;
744 	uint32_t shared, written;
745 	int i;
746 
747 	if (bwfm->sc_chip.ch_chip == BRCM_CC_43602_CHIP_ID) {
748 		bwfm_pci_select_core(sc, BWFM_AGENT_CORE_ARM_CR4);
749 		bus_space_write_4(sc->sc_reg_iot, sc->sc_reg_ioh,
750 		    BWFM_PCI_ARMCR4REG_BANKIDX, 5);
751 		bus_space_write_4(sc->sc_reg_iot, sc->sc_reg_ioh,
752 		    BWFM_PCI_ARMCR4REG_BANKPDA, 0);
753 		bus_space_write_4(sc->sc_reg_iot, sc->sc_reg_ioh,
754 		    BWFM_PCI_ARMCR4REG_BANKIDX, 7);
755 		bus_space_write_4(sc->sc_reg_iot, sc->sc_reg_ioh,
756 		    BWFM_PCI_ARMCR4REG_BANKPDA, 0);
757 	}
758 
759 	for (i = 0; i < size; i++)
760 		bus_space_write_1(sc->sc_tcm_iot, sc->sc_tcm_ioh,
761 		    bwfm->sc_chip.ch_rambase + i, ucode[i]);
762 
763 	/* Firmware replaces this with a pointer once up. */
764 	bus_space_write_4(sc->sc_tcm_iot, sc->sc_tcm_ioh,
765 	    bwfm->sc_chip.ch_rambase + bwfm->sc_chip.ch_ramsize - 4, 0);
766 
767 	if (nvram) {
768 		for (i = 0; i < nvlen; i++)
769 			bus_space_write_1(sc->sc_tcm_iot, sc->sc_tcm_ioh,
770 			    bwfm->sc_chip.ch_rambase + bwfm->sc_chip.ch_ramsize
771 			    - nvlen  + i, nvram[i]);
772 	}
773 
774 	written = bus_space_read_4(sc->sc_tcm_iot, sc->sc_tcm_ioh,
775 	    bwfm->sc_chip.ch_rambase + bwfm->sc_chip.ch_ramsize - 4);
776 
777 	/* Load reset vector from firmware and kickstart core. */
778 	if (bwfm->sc_chip.ch_chip == BRCM_CC_43602_CHIP_ID) {
779 		core = bwfm_chip_get_core(bwfm, BWFM_AGENT_INTERNAL_MEM);
780 		bwfm->sc_chip.ch_core_reset(bwfm, core, 0, 0, 0);
781 	}
782 	bwfm_chip_set_active(bwfm, *(uint32_t *)ucode);
783 
784 	for (i = 0; i < 100; i++) {
785 		delay(50 * 1000);
786 		shared = bus_space_read_4(sc->sc_tcm_iot, sc->sc_tcm_ioh,
787 		    bwfm->sc_chip.ch_rambase + bwfm->sc_chip.ch_ramsize - 4);
788 		if (shared != written)
789 			break;
790 	}
791 	if (shared == written) {
792 		printf("%s: firmware did not come up\n", DEVNAME(sc));
793 		return 1;
794 	}
795 	if (shared < bwfm->sc_chip.ch_rambase ||
796 	    shared >= bwfm->sc_chip.ch_rambase + bwfm->sc_chip.ch_ramsize) {
797 		printf("%s: invalid shared RAM address 0x%08x\n", DEVNAME(sc),
798 		    shared);
799 		return 1;
800 	}
801 
802 	sc->sc_shared_address = shared;
803 	return 0;
804 }
805 
806 int
807 bwfm_pci_detach(struct device *self, int flags)
808 {
809 	struct bwfm_pci_softc *sc = (struct bwfm_pci_softc *)self;
810 
811 	bwfm_detach(&sc->sc_sc, flags);
812 
813 	/* FIXME: free RX buffers */
814 	/* FIXME: free TX buffers */
815 	/* FIXME: free more memory */
816 
817 	bwfm_pci_dmamem_free(sc, sc->sc_ringupd_buf);
818 	bwfm_pci_dmamem_free(sc, sc->sc_scratch_buf);
819 	bwfm_pci_dmamem_free(sc, sc->sc_rx_complete.ring);
820 	bwfm_pci_dmamem_free(sc, sc->sc_tx_complete.ring);
821 	bwfm_pci_dmamem_free(sc, sc->sc_ctrl_complete.ring);
822 	bwfm_pci_dmamem_free(sc, sc->sc_rxpost_submit.ring);
823 	bwfm_pci_dmamem_free(sc, sc->sc_ctrl_submit.ring);
824 	bwfm_pci_dmamem_free(sc, sc->sc_dma_idx_buf);
825 	return 0;
826 }
827 
828 #if defined(__HAVE_FDT)
829 int
830 bwfm_pci_read_otp(struct bwfm_pci_softc *sc)
831 {
832 	struct bwfm_softc *bwfm = (void *)sc;
833 	struct bwfm_core *core;
834 	uint8_t otp[BWFM_OTP_SIZE];
835 	int i;
836 
837 	if (bwfm->sc_chip.ch_chip != BRCM_CC_4378_CHIP_ID)
838 		return 0;
839 
840 	core = bwfm_chip_get_core(bwfm, BWFM_AGENT_CORE_GCI);
841 	if (core == NULL)
842 		return 1;
843 
844 	for (i = 0; i < (sizeof(otp) / sizeof(uint32_t)); i++)
845 		((uint32_t *)otp)[i] = bwfm_pci_buscore_read(bwfm,
846 		    core->co_base + BWFM_OTP_4378_BASE + i * sizeof(uint32_t));
847 
848 	for (i = 0; i < BWFM_OTP_SIZE - 1; ) {
849 		if (otp[i + 0] == 0) {
850 			i++;
851 			continue;
852 		}
853 		if (i + otp[i + 1] > BWFM_OTP_SIZE)
854 			break;
855 		bwfm_pci_process_otp_tuple(sc, otp[i + 0], otp[i + 1],
856 		    &otp[i + 2]);
857 		i += otp[i + 1];
858 	}
859 
860 	return 0;
861 }
862 
863 void
864 bwfm_pci_process_otp_tuple(struct bwfm_pci_softc *sc, uint8_t type, uint8_t size,
865     uint8_t *data)
866 {
867 	struct bwfm_softc *bwfm = (void *)sc;
868 	char chiprev[8] = "", module[8] = "", modrev[8] = "", vendor[8] = "", chip[8] = "";
869 	char product[16] = "unknown";
870 	int node, len;
871 
872 	switch (type) {
873 	case 0x15: /* system vendor OTP */
874 		DPRINTF(("%s: system vendor OTP\n", DEVNAME(sc)));
875 		if (size < sizeof(uint32_t))
876 			return;
877 		if (data[0] != 0x08 || data[1] != 0x00 ||
878 		    data[2] != 0x00 || data[3] != 0x00)
879 			return;
880 		size -= sizeof(uint32_t);
881 		data += sizeof(uint32_t);
882 		while (size) {
883 			/* reached end */
884 			if (data[0] == 0xff)
885 				break;
886 			for (len = 0; len < size; len++)
887 				if (data[len] == 0x00 || data[len] == ' ' ||
888 				    data[len] == 0xff)
889 					break;
890 			if (len < 3 || len > 9) /* X=abcdef */
891 				goto next;
892 			if (data[1] != '=')
893 				goto next;
894 			/* NULL-terminate string */
895 			if (data[len] == ' ')
896 				data[len] = '\0';
897 			switch (data[0]) {
898 			case 's':
899 				strlcpy(chiprev, &data[2], sizeof(chiprev));
900 				break;
901 			case 'M':
902 				strlcpy(module, &data[2], sizeof(module));
903 				break;
904 			case 'm':
905 				strlcpy(modrev, &data[2], sizeof(modrev));
906 				break;
907 			case 'V':
908 				strlcpy(vendor, &data[2], sizeof(vendor));
909 				break;
910 			}
911 next:
912 			/* skip content */
913 			data += len;
914 			size -= len;
915 			/* skip spacer tag */
916 			if (size) {
917 				data++;
918 				size--;
919 			}
920 		}
921 		snprintf(chip, sizeof(chip),
922 		    bwfm->sc_chip.ch_chip > 40000 ? "%05d" : "%04x",
923 		    bwfm->sc_chip.ch_chip);
924 		node = OF_finddevice("/chosen");
925 		if (node != -1)
926 			OF_getprop(node, "module-wlan0", product, sizeof(product));
927 		printf("%s: firmware C-%s%s%s/P-%s_M-%s_V-%s__m-%s\n",
928 		    DEVNAME(sc), chip,
929 		    *chiprev ? "__s-" : "", *chiprev ? chiprev : "",
930 		    product, module, vendor, modrev);
931 		break;
932 	case 0x80: /* Broadcom CIS */
933 		DPRINTF(("%s: Broadcom CIS\n", DEVNAME(sc)));
934 		break;
935 	default:
936 		DPRINTF(("%s: unknown OTP tuple\n", DEVNAME(sc)));
937 		break;
938 	}
939 }
940 #endif
941 
942 /* DMA code */
943 struct bwfm_pci_dmamem *
944 bwfm_pci_dmamem_alloc(struct bwfm_pci_softc *sc, bus_size_t size, bus_size_t align)
945 {
946 	struct bwfm_pci_dmamem *bdm;
947 	int nsegs;
948 
949 	bdm = malloc(sizeof(*bdm), M_DEVBUF, M_WAITOK | M_ZERO);
950 	bdm->bdm_size = size;
951 
952 	if (bus_dmamap_create(sc->sc_dmat, size, 1, size, 0,
953 	    BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW, &bdm->bdm_map) != 0)
954 		goto bdmfree;
955 
956 	if (bus_dmamem_alloc(sc->sc_dmat, size, align, 0, &bdm->bdm_seg, 1,
957 	    &nsegs, BUS_DMA_WAITOK) != 0)
958 		goto destroy;
959 
960 	if (bus_dmamem_map(sc->sc_dmat, &bdm->bdm_seg, nsegs, size,
961 	    &bdm->bdm_kva, BUS_DMA_WAITOK | BUS_DMA_COHERENT) != 0)
962 		goto free;
963 
964 	if (bus_dmamap_load(sc->sc_dmat, bdm->bdm_map, bdm->bdm_kva, size,
965 	    NULL, BUS_DMA_WAITOK) != 0)
966 		goto unmap;
967 
968 	bzero(bdm->bdm_kva, size);
969 
970 	return (bdm);
971 
972 unmap:
973 	bus_dmamem_unmap(sc->sc_dmat, bdm->bdm_kva, size);
974 free:
975 	bus_dmamem_free(sc->sc_dmat, &bdm->bdm_seg, 1);
976 destroy:
977 	bus_dmamap_destroy(sc->sc_dmat, bdm->bdm_map);
978 bdmfree:
979 	free(bdm, M_DEVBUF, sizeof(*bdm));
980 
981 	return (NULL);
982 }
983 
984 void
985 bwfm_pci_dmamem_free(struct bwfm_pci_softc *sc, struct bwfm_pci_dmamem *bdm)
986 {
987 	bus_dmamem_unmap(sc->sc_dmat, bdm->bdm_kva, bdm->bdm_size);
988 	bus_dmamem_free(sc->sc_dmat, &bdm->bdm_seg, 1);
989 	bus_dmamap_destroy(sc->sc_dmat, bdm->bdm_map);
990 	free(bdm, M_DEVBUF, sizeof(*bdm));
991 }
992 
993 /*
994  * We need a simple mapping from a packet ID to mbufs, because when
995  * a transfer completed, we only know the ID so we have to look up
996  * the memory for the ID.  This simply looks for an empty slot.
997  */
998 int
999 bwfm_pci_pktid_avail(struct bwfm_pci_softc *sc, struct bwfm_pci_pkts *pkts)
1000 {
1001 	int i, idx;
1002 
1003 	idx = pkts->last + 1;
1004 	for (i = 0; i < pkts->npkt; i++) {
1005 		if (idx == pkts->npkt)
1006 			idx = 0;
1007 		if (pkts->pkts[idx].bb_m == NULL)
1008 			return 0;
1009 		idx++;
1010 	}
1011 	return ENOBUFS;
1012 }
1013 
1014 int
1015 bwfm_pci_pktid_new(struct bwfm_pci_softc *sc, struct bwfm_pci_pkts *pkts,
1016     struct mbuf *m, uint32_t *pktid, paddr_t *paddr)
1017 {
1018 	int i, idx;
1019 
1020 	idx = pkts->last + 1;
1021 	for (i = 0; i < pkts->npkt; i++) {
1022 		if (idx == pkts->npkt)
1023 			idx = 0;
1024 		if (pkts->pkts[idx].bb_m == NULL) {
1025 			if (bus_dmamap_load_mbuf(sc->sc_dmat,
1026 			    pkts->pkts[idx].bb_map, m, BUS_DMA_NOWAIT) != 0) {
1027 				if (m_defrag(m, M_DONTWAIT))
1028 					return EFBIG;
1029 				if (bus_dmamap_load_mbuf(sc->sc_dmat,
1030 				    pkts->pkts[idx].bb_map, m, BUS_DMA_NOWAIT) != 0)
1031 					return EFBIG;
1032 			}
1033 			bus_dmamap_sync(sc->sc_dmat, pkts->pkts[idx].bb_map,
1034 			    0, pkts->pkts[idx].bb_map->dm_mapsize,
1035 			    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1036 			pkts->last = idx;
1037 			pkts->pkts[idx].bb_m = m;
1038 			*pktid = idx;
1039 			*paddr = pkts->pkts[idx].bb_map->dm_segs[0].ds_addr;
1040 			return 0;
1041 		}
1042 		idx++;
1043 	}
1044 	return ENOBUFS;
1045 }
1046 
1047 struct mbuf *
1048 bwfm_pci_pktid_free(struct bwfm_pci_softc *sc, struct bwfm_pci_pkts *pkts,
1049     uint32_t pktid)
1050 {
1051 	struct mbuf *m;
1052 
1053 	if (pktid >= pkts->npkt || pkts->pkts[pktid].bb_m == NULL)
1054 		return NULL;
1055 	bus_dmamap_sync(sc->sc_dmat, pkts->pkts[pktid].bb_map, 0,
1056 	    pkts->pkts[pktid].bb_map->dm_mapsize,
1057 	    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1058 	bus_dmamap_unload(sc->sc_dmat, pkts->pkts[pktid].bb_map);
1059 	m = pkts->pkts[pktid].bb_m;
1060 	pkts->pkts[pktid].bb_m = NULL;
1061 	return m;
1062 }
1063 
1064 void
1065 bwfm_pci_fill_rx_rings(struct bwfm_pci_softc *sc)
1066 {
1067 	bwfm_pci_fill_rx_buf_ring(sc);
1068 	bwfm_pci_fill_rx_ioctl_ring(sc, &sc->sc_ioctl_ring,
1069 	    MSGBUF_TYPE_IOCTLRESP_BUF_POST);
1070 	bwfm_pci_fill_rx_ioctl_ring(sc, &sc->sc_event_ring,
1071 	    MSGBUF_TYPE_EVENT_BUF_POST);
1072 }
1073 
1074 void
1075 bwfm_pci_fill_rx_ioctl_ring(struct bwfm_pci_softc *sc, struct if_rxring *rxring,
1076     uint32_t msgtype)
1077 {
1078 	struct msgbuf_rx_ioctl_resp_or_event *req;
1079 	struct mbuf *m;
1080 	uint32_t pktid;
1081 	paddr_t paddr;
1082 	int s, slots;
1083 
1084 	s = splnet();
1085 	for (slots = if_rxr_get(rxring, 8); slots > 0; slots--) {
1086 		if (bwfm_pci_pktid_avail(sc, &sc->sc_rx_pkts))
1087 			break;
1088 		req = bwfm_pci_ring_write_reserve(sc, &sc->sc_ctrl_submit);
1089 		if (req == NULL)
1090 			break;
1091 		m = MCLGETL(NULL, M_DONTWAIT, MSGBUF_MAX_CTL_PKT_SIZE);
1092 		if (m == NULL) {
1093 			bwfm_pci_ring_write_cancel(sc, &sc->sc_ctrl_submit, 1);
1094 			break;
1095 		}
1096 		m->m_len = m->m_pkthdr.len = MSGBUF_MAX_CTL_PKT_SIZE;
1097 		if (bwfm_pci_pktid_new(sc, &sc->sc_rx_pkts, m, &pktid, &paddr)) {
1098 			bwfm_pci_ring_write_cancel(sc, &sc->sc_ctrl_submit, 1);
1099 			m_freem(m);
1100 			break;
1101 		}
1102 		memset(req, 0, sizeof(*req));
1103 		req->msg.msgtype = msgtype;
1104 		req->msg.request_id = htole32(pktid);
1105 		req->host_buf_len = htole16(MSGBUF_MAX_CTL_PKT_SIZE);
1106 		req->host_buf_addr.high_addr = htole32((uint64_t)paddr >> 32);
1107 		req->host_buf_addr.low_addr = htole32(paddr & 0xffffffff);
1108 		bwfm_pci_ring_write_commit(sc, &sc->sc_ctrl_submit);
1109 	}
1110 	if_rxr_put(rxring, slots);
1111 	splx(s);
1112 }
1113 
1114 void
1115 bwfm_pci_fill_rx_buf_ring(struct bwfm_pci_softc *sc)
1116 {
1117 	struct msgbuf_rx_bufpost *req;
1118 	struct mbuf *m;
1119 	uint32_t pktid;
1120 	paddr_t paddr;
1121 	int s, slots;
1122 
1123 	s = splnet();
1124 	for (slots = if_rxr_get(&sc->sc_rxbuf_ring, sc->sc_max_rxbufpost);
1125 	    slots > 0; slots--) {
1126 		if (bwfm_pci_pktid_avail(sc, &sc->sc_rx_pkts))
1127 			break;
1128 		req = bwfm_pci_ring_write_reserve(sc, &sc->sc_rxpost_submit);
1129 		if (req == NULL)
1130 			break;
1131 		m = MCLGETL(NULL, M_DONTWAIT, MSGBUF_MAX_PKT_SIZE);
1132 		if (m == NULL) {
1133 			bwfm_pci_ring_write_cancel(sc, &sc->sc_rxpost_submit, 1);
1134 			break;
1135 		}
1136 		m->m_len = m->m_pkthdr.len = MSGBUF_MAX_PKT_SIZE;
1137 		if (bwfm_pci_pktid_new(sc, &sc->sc_rx_pkts, m, &pktid, &paddr)) {
1138 			bwfm_pci_ring_write_cancel(sc, &sc->sc_rxpost_submit, 1);
1139 			m_freem(m);
1140 			break;
1141 		}
1142 		memset(req, 0, sizeof(*req));
1143 		req->msg.msgtype = MSGBUF_TYPE_RXBUF_POST;
1144 		req->msg.request_id = htole32(pktid);
1145 		req->data_buf_len = htole16(MSGBUF_MAX_PKT_SIZE);
1146 		req->data_buf_addr.high_addr = htole32((uint64_t)paddr >> 32);
1147 		req->data_buf_addr.low_addr = htole32(paddr & 0xffffffff);
1148 		bwfm_pci_ring_write_commit(sc, &sc->sc_rxpost_submit);
1149 	}
1150 	if_rxr_put(&sc->sc_rxbuf_ring, slots);
1151 	splx(s);
1152 }
1153 
1154 int
1155 bwfm_pci_setup_ring(struct bwfm_pci_softc *sc, struct bwfm_pci_msgring *ring,
1156     int nitem, size_t itemsz, uint32_t w_idx, uint32_t r_idx,
1157     int idx, uint32_t idx_off, uint32_t *ring_mem)
1158 {
1159 	ring->w_idx_addr = w_idx + idx * idx_off;
1160 	ring->r_idx_addr = r_idx + idx * idx_off;
1161 	ring->nitem = nitem;
1162 	ring->itemsz = itemsz;
1163 	bwfm_pci_ring_write_rptr(sc, ring);
1164 	bwfm_pci_ring_write_wptr(sc, ring);
1165 
1166 	ring->ring = bwfm_pci_dmamem_alloc(sc, nitem * itemsz, 8);
1167 	if (ring->ring == NULL)
1168 		return ENOMEM;
1169 	bus_space_write_4(sc->sc_tcm_iot, sc->sc_tcm_ioh,
1170 	    *ring_mem + BWFM_RING_MEM_BASE_ADDR_LOW,
1171 	    BWFM_PCI_DMA_DVA(ring->ring) & 0xffffffff);
1172 	bus_space_write_4(sc->sc_tcm_iot, sc->sc_tcm_ioh,
1173 	    *ring_mem + BWFM_RING_MEM_BASE_ADDR_HIGH,
1174 	    BWFM_PCI_DMA_DVA(ring->ring) >> 32);
1175 	bus_space_write_2(sc->sc_tcm_iot, sc->sc_tcm_ioh,
1176 	    *ring_mem + BWFM_RING_MAX_ITEM, nitem);
1177 	bus_space_write_2(sc->sc_tcm_iot, sc->sc_tcm_ioh,
1178 	    *ring_mem + BWFM_RING_LEN_ITEMS, itemsz);
1179 	*ring_mem = *ring_mem + BWFM_RING_MEM_SZ;
1180 	return 0;
1181 }
1182 
1183 int
1184 bwfm_pci_setup_flowring(struct bwfm_pci_softc *sc, struct bwfm_pci_msgring *ring,
1185     int nitem, size_t itemsz)
1186 {
1187 	ring->w_ptr = 0;
1188 	ring->r_ptr = 0;
1189 	ring->nitem = nitem;
1190 	ring->itemsz = itemsz;
1191 	bwfm_pci_ring_write_rptr(sc, ring);
1192 	bwfm_pci_ring_write_wptr(sc, ring);
1193 
1194 	ring->ring = bwfm_pci_dmamem_alloc(sc, nitem * itemsz, 8);
1195 	if (ring->ring == NULL)
1196 		return ENOMEM;
1197 	return 0;
1198 }
1199 
1200 /* Ring helpers */
1201 void
1202 bwfm_pci_ring_bell(struct bwfm_pci_softc *sc,
1203     struct bwfm_pci_msgring *ring)
1204 {
1205 	struct bwfm_softc *bwfm = (void *)sc;
1206 
1207 	if (bwfm->sc_chip.ch_chip == BRCM_CC_4378_CHIP_ID)
1208 		bus_space_write_4(sc->sc_reg_iot, sc->sc_reg_ioh,
1209 		    BWFM_PCI_64_PCIE2REG_H2D_MAILBOX_0, 1);
1210 	else
1211 		bus_space_write_4(sc->sc_reg_iot, sc->sc_reg_ioh,
1212 		    BWFM_PCI_PCIE2REG_H2D_MAILBOX_0, 1);
1213 }
1214 
1215 void
1216 bwfm_pci_ring_update_rptr(struct bwfm_pci_softc *sc,
1217     struct bwfm_pci_msgring *ring)
1218 {
1219 	if (sc->sc_dma_idx_sz == 0) {
1220 		ring->r_ptr = bus_space_read_2(sc->sc_tcm_iot,
1221 		    sc->sc_tcm_ioh, ring->r_idx_addr);
1222 	} else {
1223 		bus_dmamap_sync(sc->sc_dmat,
1224 		    BWFM_PCI_DMA_MAP(sc->sc_dma_idx_buf), ring->r_idx_addr,
1225 		    sizeof(uint16_t), BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1226 		ring->r_ptr = *(uint16_t *)(BWFM_PCI_DMA_KVA(sc->sc_dma_idx_buf)
1227 		    + ring->r_idx_addr);
1228 	}
1229 }
1230 
1231 void
1232 bwfm_pci_ring_update_wptr(struct bwfm_pci_softc *sc,
1233     struct bwfm_pci_msgring *ring)
1234 {
1235 	if (sc->sc_dma_idx_sz == 0) {
1236 		ring->w_ptr = bus_space_read_2(sc->sc_tcm_iot,
1237 		    sc->sc_tcm_ioh, ring->w_idx_addr);
1238 	} else {
1239 		bus_dmamap_sync(sc->sc_dmat,
1240 		    BWFM_PCI_DMA_MAP(sc->sc_dma_idx_buf), ring->w_idx_addr,
1241 		    sizeof(uint16_t), BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1242 		ring->w_ptr = *(uint16_t *)(BWFM_PCI_DMA_KVA(sc->sc_dma_idx_buf)
1243 		    + ring->w_idx_addr);
1244 	}
1245 }
1246 
1247 void
1248 bwfm_pci_ring_write_rptr(struct bwfm_pci_softc *sc,
1249     struct bwfm_pci_msgring *ring)
1250 {
1251 	if (sc->sc_dma_idx_sz == 0) {
1252 		bus_space_write_2(sc->sc_tcm_iot, sc->sc_tcm_ioh,
1253 		    ring->r_idx_addr, ring->r_ptr);
1254 	} else {
1255 		*(uint16_t *)(BWFM_PCI_DMA_KVA(sc->sc_dma_idx_buf)
1256 		    + ring->r_idx_addr) = ring->r_ptr;
1257 		bus_dmamap_sync(sc->sc_dmat,
1258 		    BWFM_PCI_DMA_MAP(sc->sc_dma_idx_buf), ring->r_idx_addr,
1259 		    sizeof(uint16_t), BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1260 	}
1261 }
1262 
1263 void
1264 bwfm_pci_ring_write_wptr(struct bwfm_pci_softc *sc,
1265     struct bwfm_pci_msgring *ring)
1266 {
1267 	if (sc->sc_dma_idx_sz == 0) {
1268 		bus_space_write_2(sc->sc_tcm_iot, sc->sc_tcm_ioh,
1269 		    ring->w_idx_addr, ring->w_ptr);
1270 	} else {
1271 		*(uint16_t *)(BWFM_PCI_DMA_KVA(sc->sc_dma_idx_buf)
1272 		    + ring->w_idx_addr) = ring->w_ptr;
1273 		bus_dmamap_sync(sc->sc_dmat,
1274 		    BWFM_PCI_DMA_MAP(sc->sc_dma_idx_buf), ring->w_idx_addr,
1275 		    sizeof(uint16_t), BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1276 	}
1277 }
1278 
1279 /*
1280  * Retrieve a free descriptor to put new stuff in, but don't commit
1281  * to it yet so we can rollback later if any error occurs.
1282  */
1283 void *
1284 bwfm_pci_ring_write_reserve(struct bwfm_pci_softc *sc,
1285     struct bwfm_pci_msgring *ring)
1286 {
1287 	int available;
1288 	char *ret;
1289 
1290 	bwfm_pci_ring_update_rptr(sc, ring);
1291 
1292 	if (ring->r_ptr > ring->w_ptr)
1293 		available = ring->r_ptr - ring->w_ptr;
1294 	else
1295 		available = ring->r_ptr + (ring->nitem - ring->w_ptr);
1296 
1297 	if (available <= 1)
1298 		return NULL;
1299 
1300 	ret = BWFM_PCI_DMA_KVA(ring->ring) + (ring->w_ptr * ring->itemsz);
1301 	ring->w_ptr += 1;
1302 	if (ring->w_ptr == ring->nitem)
1303 		ring->w_ptr = 0;
1304 	return ret;
1305 }
1306 
1307 void *
1308 bwfm_pci_ring_write_reserve_multi(struct bwfm_pci_softc *sc,
1309     struct bwfm_pci_msgring *ring, int count, int *avail)
1310 {
1311 	int available;
1312 	char *ret;
1313 
1314 	bwfm_pci_ring_update_rptr(sc, ring);
1315 
1316 	if (ring->r_ptr > ring->w_ptr)
1317 		available = ring->r_ptr - ring->w_ptr;
1318 	else
1319 		available = ring->r_ptr + (ring->nitem - ring->w_ptr);
1320 
1321 	if (available <= 1)
1322 		return NULL;
1323 
1324 	ret = BWFM_PCI_DMA_KVA(ring->ring) + (ring->w_ptr * ring->itemsz);
1325 	*avail = min(count, available - 1);
1326 	if (*avail + ring->w_ptr > ring->nitem)
1327 		*avail = ring->nitem - ring->w_ptr;
1328 	ring->w_ptr += *avail;
1329 	if (ring->w_ptr == ring->nitem)
1330 		ring->w_ptr = 0;
1331 	return ret;
1332 }
1333 
1334 /*
1335  * Read number of descriptors available (submitted by the firmware)
1336  * and retrieve pointer to first descriptor.
1337  */
1338 void *
1339 bwfm_pci_ring_read_avail(struct bwfm_pci_softc *sc,
1340     struct bwfm_pci_msgring *ring, int *avail)
1341 {
1342 	bwfm_pci_ring_update_wptr(sc, ring);
1343 
1344 	if (ring->w_ptr >= ring->r_ptr)
1345 		*avail = ring->w_ptr - ring->r_ptr;
1346 	else
1347 		*avail = ring->nitem - ring->r_ptr;
1348 
1349 	if (*avail == 0)
1350 		return NULL;
1351 
1352 	bus_dmamap_sync(sc->sc_dmat, BWFM_PCI_DMA_MAP(ring->ring),
1353 	    ring->r_ptr * ring->itemsz, *avail * ring->itemsz,
1354 	    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1355 	return BWFM_PCI_DMA_KVA(ring->ring) + (ring->r_ptr * ring->itemsz);
1356 }
1357 
1358 /*
1359  * Let firmware know we read N descriptors.
1360  */
1361 void
1362 bwfm_pci_ring_read_commit(struct bwfm_pci_softc *sc,
1363     struct bwfm_pci_msgring *ring, int nitem)
1364 {
1365 	ring->r_ptr += nitem;
1366 	if (ring->r_ptr == ring->nitem)
1367 		ring->r_ptr = 0;
1368 	bwfm_pci_ring_write_rptr(sc, ring);
1369 }
1370 
1371 /*
1372  * Let firmware know that we submitted some descriptors.
1373  */
1374 void
1375 bwfm_pci_ring_write_commit(struct bwfm_pci_softc *sc,
1376     struct bwfm_pci_msgring *ring)
1377 {
1378 	bus_dmamap_sync(sc->sc_dmat, BWFM_PCI_DMA_MAP(ring->ring),
1379 	    0, BWFM_PCI_DMA_LEN(ring->ring), BUS_DMASYNC_PREREAD |
1380 	    BUS_DMASYNC_PREWRITE);
1381 	bwfm_pci_ring_write_wptr(sc, ring);
1382 	bwfm_pci_ring_bell(sc, ring);
1383 }
1384 
1385 /*
1386  * Rollback N descriptors in case we don't actually want
1387  * to commit to it.
1388  */
1389 void
1390 bwfm_pci_ring_write_cancel(struct bwfm_pci_softc *sc,
1391     struct bwfm_pci_msgring *ring, int nitem)
1392 {
1393 	if (ring->w_ptr == 0)
1394 		ring->w_ptr = ring->nitem - nitem;
1395 	else
1396 		ring->w_ptr -= nitem;
1397 }
1398 
1399 /*
1400  * Foreach written descriptor on the ring, pass the descriptor to
1401  * a message handler and let the firmware know we handled it.
1402  */
1403 void
1404 bwfm_pci_ring_rx(struct bwfm_pci_softc *sc, struct bwfm_pci_msgring *ring,
1405     struct mbuf_list *ml)
1406 {
1407 	void *buf;
1408 	int avail, processed;
1409 
1410 again:
1411 	buf = bwfm_pci_ring_read_avail(sc, ring, &avail);
1412 	if (buf == NULL)
1413 		return;
1414 
1415 	processed = 0;
1416 	while (avail) {
1417 		bwfm_pci_msg_rx(sc, buf + sc->sc_rx_dataoffset, ml);
1418 		buf += ring->itemsz;
1419 		processed++;
1420 		if (processed == 48) {
1421 			bwfm_pci_ring_read_commit(sc, ring, processed);
1422 			processed = 0;
1423 		}
1424 		avail--;
1425 	}
1426 	if (processed)
1427 		bwfm_pci_ring_read_commit(sc, ring, processed);
1428 	if (ring->r_ptr == 0)
1429 		goto again;
1430 }
1431 
1432 void
1433 bwfm_pci_msg_rx(struct bwfm_pci_softc *sc, void *buf, struct mbuf_list *ml)
1434 {
1435 	struct ifnet *ifp = &sc->sc_sc.sc_ic.ic_if;
1436 	struct msgbuf_ioctl_resp_hdr *resp;
1437 	struct msgbuf_tx_status *tx;
1438 	struct msgbuf_rx_complete *rx;
1439 	struct msgbuf_rx_event *event;
1440 	struct msgbuf_common_hdr *msg;
1441 	struct msgbuf_flowring_create_resp *fcr;
1442 	struct msgbuf_flowring_delete_resp *fdr;
1443 	struct bwfm_pci_msgring *ring;
1444 	struct mbuf *m;
1445 	int flowid;
1446 
1447 	msg = (struct msgbuf_common_hdr *)buf;
1448 	switch (msg->msgtype)
1449 	{
1450 	case MSGBUF_TYPE_FLOW_RING_CREATE_CMPLT:
1451 		fcr = (struct msgbuf_flowring_create_resp *)buf;
1452 		flowid = letoh16(fcr->compl_hdr.flow_ring_id);
1453 		if (flowid < 2)
1454 			break;
1455 		flowid -= 2;
1456 		if (flowid >= sc->sc_max_flowrings)
1457 			break;
1458 		ring = &sc->sc_flowrings[flowid];
1459 		if (ring->status != RING_OPENING)
1460 			break;
1461 		if (fcr->compl_hdr.status) {
1462 			printf("%s: failed to open flowring %d\n",
1463 			    DEVNAME(sc), flowid);
1464 			ring->status = RING_CLOSED;
1465 			if (ring->m) {
1466 				m_freem(ring->m);
1467 				ring->m = NULL;
1468 			}
1469 			ifq_restart(&ifp->if_snd);
1470 			break;
1471 		}
1472 		ring->status = RING_OPEN;
1473 		if (ring->m != NULL) {
1474 			m = ring->m;
1475 			ring->m = NULL;
1476 			if (bwfm_pci_txdata(&sc->sc_sc, m))
1477 				m_freem(ring->m);
1478 		}
1479 		ifq_restart(&ifp->if_snd);
1480 		break;
1481 	case MSGBUF_TYPE_FLOW_RING_DELETE_CMPLT:
1482 		fdr = (struct msgbuf_flowring_delete_resp *)buf;
1483 		flowid = letoh16(fdr->compl_hdr.flow_ring_id);
1484 		if (flowid < 2)
1485 			break;
1486 		flowid -= 2;
1487 		if (flowid >= sc->sc_max_flowrings)
1488 			break;
1489 		ring = &sc->sc_flowrings[flowid];
1490 		if (ring->status != RING_CLOSING)
1491 			break;
1492 		if (fdr->compl_hdr.status) {
1493 			printf("%s: failed to delete flowring %d\n",
1494 			    DEVNAME(sc), flowid);
1495 			break;
1496 		}
1497 		bwfm_pci_dmamem_free(sc, ring->ring);
1498 		ring->status = RING_CLOSED;
1499 		break;
1500 	case MSGBUF_TYPE_IOCTLPTR_REQ_ACK:
1501 		m = bwfm_pci_pktid_free(sc, &sc->sc_ioctl_pkts,
1502 		    letoh32(msg->request_id));
1503 		if (m == NULL)
1504 			break;
1505 		m_freem(m);
1506 		break;
1507 	case MSGBUF_TYPE_IOCTL_CMPLT:
1508 		resp = (struct msgbuf_ioctl_resp_hdr *)buf;
1509 		bwfm_pci_msgbuf_rxioctl(sc, resp);
1510 		if_rxr_put(&sc->sc_ioctl_ring, 1);
1511 		bwfm_pci_fill_rx_rings(sc);
1512 		break;
1513 	case MSGBUF_TYPE_WL_EVENT:
1514 		event = (struct msgbuf_rx_event *)buf;
1515 		m = bwfm_pci_pktid_free(sc, &sc->sc_rx_pkts,
1516 		    letoh32(event->msg.request_id));
1517 		if (m == NULL)
1518 			break;
1519 		m_adj(m, sc->sc_rx_dataoffset);
1520 		m->m_len = m->m_pkthdr.len = letoh16(event->event_data_len);
1521 		bwfm_rx(&sc->sc_sc, m, ml);
1522 		if_rxr_put(&sc->sc_event_ring, 1);
1523 		bwfm_pci_fill_rx_rings(sc);
1524 		break;
1525 	case MSGBUF_TYPE_TX_STATUS:
1526 		tx = (struct msgbuf_tx_status *)buf;
1527 		m = bwfm_pci_pktid_free(sc, &sc->sc_tx_pkts,
1528 		    letoh32(tx->msg.request_id) - 1);
1529 		if (m == NULL)
1530 			break;
1531 		m_freem(m);
1532 		if (sc->sc_tx_pkts_full) {
1533 			sc->sc_tx_pkts_full = 0;
1534 			ifq_restart(&ifp->if_snd);
1535 		}
1536 		break;
1537 	case MSGBUF_TYPE_RX_CMPLT:
1538 		rx = (struct msgbuf_rx_complete *)buf;
1539 		m = bwfm_pci_pktid_free(sc, &sc->sc_rx_pkts,
1540 		    letoh32(rx->msg.request_id));
1541 		if (m == NULL)
1542 			break;
1543 		if (letoh16(rx->data_offset))
1544 			m_adj(m, letoh16(rx->data_offset));
1545 		else if (sc->sc_rx_dataoffset)
1546 			m_adj(m, sc->sc_rx_dataoffset);
1547 		m->m_len = m->m_pkthdr.len = letoh16(rx->data_len);
1548 		bwfm_rx(&sc->sc_sc, m, ml);
1549 		if_rxr_put(&sc->sc_rxbuf_ring, 1);
1550 		bwfm_pci_fill_rx_rings(sc);
1551 		break;
1552 	default:
1553 		printf("%s: msgtype 0x%08x\n", __func__, msg->msgtype);
1554 		break;
1555 	}
1556 }
1557 
1558 /* Bus core helpers */
1559 void
1560 bwfm_pci_select_core(struct bwfm_pci_softc *sc, int id)
1561 {
1562 	struct bwfm_softc *bwfm = (void *)sc;
1563 	struct bwfm_core *core;
1564 
1565 	core = bwfm_chip_get_core(bwfm, id);
1566 	if (core == NULL) {
1567 		printf("%s: could not find core to select", DEVNAME(sc));
1568 		return;
1569 	}
1570 
1571 	pci_conf_write(sc->sc_pc, sc->sc_tag,
1572 	    BWFM_PCI_BAR0_WINDOW, core->co_base);
1573 	if (pci_conf_read(sc->sc_pc, sc->sc_tag,
1574 	    BWFM_PCI_BAR0_WINDOW) != core->co_base)
1575 		pci_conf_write(sc->sc_pc, sc->sc_tag,
1576 		    BWFM_PCI_BAR0_WINDOW, core->co_base);
1577 }
1578 
1579 uint32_t
1580 bwfm_pci_buscore_read(struct bwfm_softc *bwfm, uint32_t reg)
1581 {
1582 	struct bwfm_pci_softc *sc = (void *)bwfm;
1583 	uint32_t page, offset;
1584 
1585 	page = reg & ~(BWFM_PCI_BAR0_REG_SIZE - 1);
1586 	offset = reg & (BWFM_PCI_BAR0_REG_SIZE - 1);
1587 	pci_conf_write(sc->sc_pc, sc->sc_tag, BWFM_PCI_BAR0_WINDOW, page);
1588 	return bus_space_read_4(sc->sc_reg_iot, sc->sc_reg_ioh, offset);
1589 }
1590 
1591 void
1592 bwfm_pci_buscore_write(struct bwfm_softc *bwfm, uint32_t reg, uint32_t val)
1593 {
1594 	struct bwfm_pci_softc *sc = (void *)bwfm;
1595 	uint32_t page, offset;
1596 
1597 	page = reg & ~(BWFM_PCI_BAR0_REG_SIZE - 1);
1598 	offset = reg & (BWFM_PCI_BAR0_REG_SIZE - 1);
1599 	pci_conf_write(sc->sc_pc, sc->sc_tag, BWFM_PCI_BAR0_WINDOW, page);
1600 	bus_space_write_4(sc->sc_reg_iot, sc->sc_reg_ioh, offset, val);
1601 }
1602 
1603 int
1604 bwfm_pci_buscore_prepare(struct bwfm_softc *bwfm)
1605 {
1606 	return 0;
1607 }
1608 
1609 int
1610 bwfm_pci_buscore_reset(struct bwfm_softc *bwfm)
1611 {
1612 	struct bwfm_pci_softc *sc = (void *)bwfm;
1613 	struct bwfm_core *core;
1614 	uint32_t reg;
1615 	int i;
1616 
1617 	bwfm_pci_select_core(sc, BWFM_AGENT_CORE_PCIE2);
1618 	reg = pci_conf_read(sc->sc_pc, sc->sc_tag,
1619 	    BWFM_PCI_CFGREG_LINK_STATUS_CTRL);
1620 	pci_conf_write(sc->sc_pc, sc->sc_tag, BWFM_PCI_CFGREG_LINK_STATUS_CTRL,
1621 	    reg & ~BWFM_PCI_CFGREG_LINK_STATUS_CTRL_ASPM_ENAB);
1622 
1623 	bwfm_pci_select_core(sc, BWFM_AGENT_CORE_CHIPCOMMON);
1624 	bus_space_write_4(sc->sc_reg_iot, sc->sc_reg_ioh,
1625 	    BWFM_CHIP_REG_WATCHDOG, 4);
1626 	delay(100 * 1000);
1627 
1628 	bwfm_pci_select_core(sc, BWFM_AGENT_CORE_PCIE2);
1629 	pci_conf_write(sc->sc_pc, sc->sc_tag,
1630 	    BWFM_PCI_CFGREG_LINK_STATUS_CTRL, reg);
1631 
1632 	core = bwfm_chip_get_core(bwfm, BWFM_AGENT_CORE_PCIE2);
1633 	if (core->co_rev <= 13) {
1634 		uint16_t cfg_offset[] = {
1635 		    BWFM_PCI_CFGREG_STATUS_CMD,
1636 		    BWFM_PCI_CFGREG_PM_CSR,
1637 		    BWFM_PCI_CFGREG_MSI_CAP,
1638 		    BWFM_PCI_CFGREG_MSI_ADDR_L,
1639 		    BWFM_PCI_CFGREG_MSI_ADDR_H,
1640 		    BWFM_PCI_CFGREG_MSI_DATA,
1641 		    BWFM_PCI_CFGREG_LINK_STATUS_CTRL2,
1642 		    BWFM_PCI_CFGREG_RBAR_CTRL,
1643 		    BWFM_PCI_CFGREG_PML1_SUB_CTRL1,
1644 		    BWFM_PCI_CFGREG_REG_BAR2_CONFIG,
1645 		    BWFM_PCI_CFGREG_REG_BAR3_CONFIG,
1646 		};
1647 
1648 		for (i = 0; i < nitems(cfg_offset); i++) {
1649 			bus_space_write_4(sc->sc_reg_iot, sc->sc_reg_ioh,
1650 			    BWFM_PCI_PCIE2REG_CONFIGADDR, cfg_offset[i]);
1651 			reg = bus_space_read_4(sc->sc_reg_iot, sc->sc_reg_ioh,
1652 			    BWFM_PCI_PCIE2REG_CONFIGDATA);
1653 			DPRINTFN(3, ("%s: config offset 0x%04x, value 0x%04x\n",
1654 			    DEVNAME(sc), cfg_offset[i], reg));
1655 			bus_space_write_4(sc->sc_reg_iot, sc->sc_reg_ioh,
1656 			    BWFM_PCI_PCIE2REG_CONFIGDATA, reg);
1657 		}
1658 	}
1659 
1660 	reg = bwfm_pci_intr_status(sc);
1661 	if (reg != 0xffffffff)
1662 		bwfm_pci_intr_ack(sc, reg);
1663 
1664 	return 0;
1665 }
1666 
1667 void
1668 bwfm_pci_buscore_activate(struct bwfm_softc *bwfm, uint32_t rstvec)
1669 {
1670 	struct bwfm_pci_softc *sc = (void *)bwfm;
1671 	bus_space_write_4(sc->sc_tcm_iot, sc->sc_tcm_ioh, 0, rstvec);
1672 }
1673 
1674 static int bwfm_pci_prio2fifo[8] = {
1675 	0, /* best effort */
1676 	1, /* IPTOS_PREC_IMMEDIATE */
1677 	1, /* IPTOS_PREC_PRIORITY */
1678 	0, /* IPTOS_PREC_FLASH */
1679 	2, /* IPTOS_PREC_FLASHOVERRIDE */
1680 	2, /* IPTOS_PREC_CRITIC_ECP */
1681 	3, /* IPTOS_PREC_INTERNETCONTROL */
1682 	3, /* IPTOS_PREC_NETCONTROL */
1683 };
1684 
1685 int
1686 bwfm_pci_flowring_lookup(struct bwfm_pci_softc *sc, struct mbuf *m)
1687 {
1688 	struct ieee80211com *ic = &sc->sc_sc.sc_ic;
1689 #ifndef IEEE80211_STA_ONLY
1690 	uint8_t *da = mtod(m, uint8_t *);
1691 #endif
1692 	int flowid, prio, fifo;
1693 	int i, found;
1694 
1695 	prio = ieee80211_classify(ic, m);
1696 	fifo = bwfm_pci_prio2fifo[prio];
1697 
1698 	switch (ic->ic_opmode)
1699 	{
1700 	case IEEE80211_M_STA:
1701 		flowid = fifo;
1702 		break;
1703 #ifndef IEEE80211_STA_ONLY
1704 	case IEEE80211_M_HOSTAP:
1705 		if (ETHER_IS_MULTICAST(da))
1706 			da = etherbroadcastaddr;
1707 		flowid = da[5] * 2 + fifo;
1708 		break;
1709 #endif
1710 	default:
1711 		printf("%s: state not supported\n", DEVNAME(sc));
1712 		return ENOBUFS;
1713 	}
1714 
1715 	found = 0;
1716 	flowid = flowid % sc->sc_max_flowrings;
1717 	for (i = 0; i < sc->sc_max_flowrings; i++) {
1718 		if (ic->ic_opmode == IEEE80211_M_STA &&
1719 		    sc->sc_flowrings[flowid].status >= RING_OPEN &&
1720 		    sc->sc_flowrings[flowid].fifo == fifo) {
1721 			found = 1;
1722 			break;
1723 		}
1724 #ifndef IEEE80211_STA_ONLY
1725 		if (ic->ic_opmode == IEEE80211_M_HOSTAP &&
1726 		    sc->sc_flowrings[flowid].status >= RING_OPEN &&
1727 		    sc->sc_flowrings[flowid].fifo == fifo &&
1728 		    !memcmp(sc->sc_flowrings[flowid].mac, da, ETHER_ADDR_LEN)) {
1729 			found = 1;
1730 			break;
1731 		}
1732 #endif
1733 		flowid = (flowid + 1) % sc->sc_max_flowrings;
1734 	}
1735 
1736 	if (found)
1737 		return flowid;
1738 
1739 	return -1;
1740 }
1741 
1742 void
1743 bwfm_pci_flowring_create(struct bwfm_pci_softc *sc, struct mbuf *m)
1744 {
1745 	struct ieee80211com *ic = &sc->sc_sc.sc_ic;
1746 	struct bwfm_cmd_flowring_create cmd;
1747 #ifndef IEEE80211_STA_ONLY
1748 	uint8_t *da = mtod(m, uint8_t *);
1749 #endif
1750 	struct bwfm_pci_msgring *ring;
1751 	int flowid, prio, fifo;
1752 	int i, found;
1753 
1754 	prio = ieee80211_classify(ic, m);
1755 	fifo = bwfm_pci_prio2fifo[prio];
1756 
1757 	switch (ic->ic_opmode)
1758 	{
1759 	case IEEE80211_M_STA:
1760 		flowid = fifo;
1761 		break;
1762 #ifndef IEEE80211_STA_ONLY
1763 	case IEEE80211_M_HOSTAP:
1764 		if (ETHER_IS_MULTICAST(da))
1765 			da = etherbroadcastaddr;
1766 		flowid = da[5] * 2 + fifo;
1767 		break;
1768 #endif
1769 	default:
1770 		printf("%s: state not supported\n", DEVNAME(sc));
1771 		return;
1772 	}
1773 
1774 	found = 0;
1775 	flowid = flowid % sc->sc_max_flowrings;
1776 	for (i = 0; i < sc->sc_max_flowrings; i++) {
1777 		ring = &sc->sc_flowrings[flowid];
1778 		if (ring->status == RING_CLOSED) {
1779 			ring->status = RING_OPENING;
1780 			found = 1;
1781 			break;
1782 		}
1783 		flowid = (flowid + 1) % sc->sc_max_flowrings;
1784 	}
1785 
1786 	/*
1787 	 * We cannot recover from that so far.  Only a stop/init
1788 	 * cycle can revive this if it ever happens at all.
1789 	 */
1790 	if (!found) {
1791 		printf("%s: no flowring available\n", DEVNAME(sc));
1792 		return;
1793 	}
1794 
1795 	cmd.m = m;
1796 	cmd.prio = prio;
1797 	cmd.flowid = flowid;
1798 	bwfm_do_async(&sc->sc_sc, bwfm_pci_flowring_create_cb, &cmd, sizeof(cmd));
1799 }
1800 
1801 void
1802 bwfm_pci_flowring_create_cb(struct bwfm_softc *bwfm, void *arg)
1803 {
1804 	struct bwfm_pci_softc *sc = (void *)bwfm;
1805 #ifndef IEEE80211_STA_ONLY
1806 	struct ieee80211com *ic = &sc->sc_sc.sc_ic;
1807 #endif
1808 	struct bwfm_cmd_flowring_create *cmd = arg;
1809 	struct msgbuf_tx_flowring_create_req *req;
1810 	struct bwfm_pci_msgring *ring;
1811 	uint8_t *da, *sa;
1812 	int s;
1813 
1814 	da = mtod(cmd->m, char *) + 0 * ETHER_ADDR_LEN;
1815 	sa = mtod(cmd->m, char *) + 1 * ETHER_ADDR_LEN;
1816 
1817 	ring = &sc->sc_flowrings[cmd->flowid];
1818 	if (ring->status != RING_OPENING) {
1819 		printf("%s: flowring not opening\n", DEVNAME(sc));
1820 		return;
1821 	}
1822 
1823 	if (bwfm_pci_setup_flowring(sc, ring, 512, 48)) {
1824 		printf("%s: cannot setup flowring\n", DEVNAME(sc));
1825 		return;
1826 	}
1827 
1828 	s = splnet();
1829 	req = bwfm_pci_ring_write_reserve(sc, &sc->sc_ctrl_submit);
1830 	if (req == NULL) {
1831 		printf("%s: cannot reserve for flowring\n", DEVNAME(sc));
1832 		splx(s);
1833 		return;
1834 	}
1835 
1836 	ring->status = RING_OPENING;
1837 	ring->fifo = bwfm_pci_prio2fifo[cmd->prio];
1838 	ring->m = cmd->m;
1839 	memcpy(ring->mac, da, ETHER_ADDR_LEN);
1840 #ifndef IEEE80211_STA_ONLY
1841 	if (ic->ic_opmode == IEEE80211_M_HOSTAP && ETHER_IS_MULTICAST(da))
1842 		memcpy(ring->mac, etherbroadcastaddr, ETHER_ADDR_LEN);
1843 #endif
1844 
1845 	req->msg.msgtype = MSGBUF_TYPE_FLOW_RING_CREATE;
1846 	req->msg.ifidx = 0;
1847 	req->msg.request_id = 0;
1848 	req->tid = bwfm_pci_prio2fifo[cmd->prio];
1849 	req->flow_ring_id = letoh16(cmd->flowid + 2);
1850 	memcpy(req->da, da, ETHER_ADDR_LEN);
1851 	memcpy(req->sa, sa, ETHER_ADDR_LEN);
1852 	req->flow_ring_addr.high_addr =
1853 	    letoh32(BWFM_PCI_DMA_DVA(ring->ring) >> 32);
1854 	req->flow_ring_addr.low_addr =
1855 	    letoh32(BWFM_PCI_DMA_DVA(ring->ring) & 0xffffffff);
1856 	req->max_items = letoh16(512);
1857 	req->len_item = letoh16(48);
1858 
1859 	bwfm_pci_ring_write_commit(sc, &sc->sc_ctrl_submit);
1860 	splx(s);
1861 }
1862 
1863 void
1864 bwfm_pci_flowring_delete(struct bwfm_pci_softc *sc, int flowid)
1865 {
1866 	struct msgbuf_tx_flowring_delete_req *req;
1867 	struct bwfm_pci_msgring *ring;
1868 	int s;
1869 
1870 	ring = &sc->sc_flowrings[flowid];
1871 	if (ring->status != RING_OPEN) {
1872 		printf("%s: flowring not open\n", DEVNAME(sc));
1873 		return;
1874 	}
1875 
1876 	s = splnet();
1877 	req = bwfm_pci_ring_write_reserve(sc, &sc->sc_ctrl_submit);
1878 	if (req == NULL) {
1879 		printf("%s: cannot reserve for flowring\n", DEVNAME(sc));
1880 		splx(s);
1881 		return;
1882 	}
1883 
1884 	ring->status = RING_CLOSING;
1885 
1886 	req->msg.msgtype = MSGBUF_TYPE_FLOW_RING_DELETE;
1887 	req->msg.ifidx = 0;
1888 	req->msg.request_id = 0;
1889 	req->flow_ring_id = letoh16(flowid + 2);
1890 	req->reason = 0;
1891 
1892 	bwfm_pci_ring_write_commit(sc, &sc->sc_ctrl_submit);
1893 	splx(s);
1894 }
1895 
1896 void
1897 bwfm_pci_stop(struct bwfm_softc *bwfm)
1898 {
1899 	struct bwfm_pci_softc *sc = (void *)bwfm;
1900 	struct bwfm_pci_msgring *ring;
1901 	int i;
1902 
1903 	for (i = 0; i < sc->sc_max_flowrings; i++) {
1904 		ring = &sc->sc_flowrings[i];
1905 		if (ring->status == RING_OPEN)
1906 			bwfm_pci_flowring_delete(sc, i);
1907 	}
1908 }
1909 
1910 int
1911 bwfm_pci_txcheck(struct bwfm_softc *bwfm)
1912 {
1913 	struct bwfm_pci_softc *sc = (void *)bwfm;
1914 	struct bwfm_pci_msgring *ring;
1915 	int i;
1916 
1917 	/* If we are transitioning, we cannot send. */
1918 	for (i = 0; i < sc->sc_max_flowrings; i++) {
1919 		ring = &sc->sc_flowrings[i];
1920 		if (ring->status == RING_OPENING)
1921 			return ENOBUFS;
1922 	}
1923 
1924 	if (bwfm_pci_pktid_avail(sc, &sc->sc_tx_pkts)) {
1925 		sc->sc_tx_pkts_full = 1;
1926 		return ENOBUFS;
1927 	}
1928 
1929 	return 0;
1930 }
1931 
1932 int
1933 bwfm_pci_txdata(struct bwfm_softc *bwfm, struct mbuf *m)
1934 {
1935 	struct bwfm_pci_softc *sc = (void *)bwfm;
1936 	struct bwfm_pci_msgring *ring;
1937 	struct msgbuf_tx_msghdr *tx;
1938 	uint32_t pktid;
1939 	paddr_t paddr;
1940 	int flowid, ret;
1941 
1942 	flowid = bwfm_pci_flowring_lookup(sc, m);
1943 	if (flowid < 0) {
1944 		/*
1945 		 * We cannot send the packet right now as there is
1946 		 * no flowring yet.  The flowring will be created
1947 		 * asynchronously.  While the ring is transitioning
1948 		 * the TX check will tell the upper layers that we
1949 		 * cannot send packets right now.  When the flowring
1950 		 * is created the queue will be restarted and this
1951 		 * mbuf will be transmitted.
1952 		 */
1953 		bwfm_pci_flowring_create(sc, m);
1954 		return 0;
1955 	}
1956 
1957 	ring = &sc->sc_flowrings[flowid];
1958 	if (ring->status == RING_OPENING ||
1959 	    ring->status == RING_CLOSING) {
1960 		printf("%s: tried to use a flow that was "
1961 		    "transitioning in status %d\n",
1962 		    DEVNAME(sc), ring->status);
1963 		return ENOBUFS;
1964 	}
1965 
1966 	tx = bwfm_pci_ring_write_reserve(sc, ring);
1967 	if (tx == NULL)
1968 		return ENOBUFS;
1969 
1970 	memset(tx, 0, sizeof(*tx));
1971 	tx->msg.msgtype = MSGBUF_TYPE_TX_POST;
1972 	tx->msg.ifidx = 0;
1973 	tx->flags = BWFM_MSGBUF_PKT_FLAGS_FRAME_802_3;
1974 	tx->flags |= ieee80211_classify(&sc->sc_sc.sc_ic, m) <<
1975 	    BWFM_MSGBUF_PKT_FLAGS_PRIO_SHIFT;
1976 	tx->seg_cnt = 1;
1977 	memcpy(tx->txhdr, mtod(m, char *), ETHER_HDR_LEN);
1978 
1979 	ret = bwfm_pci_pktid_new(sc, &sc->sc_tx_pkts, m, &pktid, &paddr);
1980 	if (ret) {
1981 		if (ret == ENOBUFS) {
1982 			printf("%s: no pktid available for TX\n",
1983 			    DEVNAME(sc));
1984 			sc->sc_tx_pkts_full = 1;
1985 		}
1986 		bwfm_pci_ring_write_cancel(sc, ring, 1);
1987 		return ret;
1988 	}
1989 	paddr += ETHER_HDR_LEN;
1990 
1991 	tx->msg.request_id = htole32(pktid + 1);
1992 	tx->data_len = htole16(m->m_len - ETHER_HDR_LEN);
1993 	tx->data_buf_addr.high_addr = htole32((uint64_t)paddr >> 32);
1994 	tx->data_buf_addr.low_addr = htole32(paddr & 0xffffffff);
1995 
1996 	bwfm_pci_ring_write_commit(sc, ring);
1997 	return 0;
1998 }
1999 
2000 #ifdef BWFM_DEBUG
2001 void
2002 bwfm_pci_debug_console(struct bwfm_pci_softc *sc)
2003 {
2004 	uint32_t newidx = bus_space_read_4(sc->sc_tcm_iot, sc->sc_tcm_ioh,
2005 	    sc->sc_console_base_addr + BWFM_CONSOLE_WRITEIDX);
2006 
2007 	if (newidx != sc->sc_console_readidx)
2008 		DPRINTFN(3, ("BWFM CONSOLE: "));
2009 	while (newidx != sc->sc_console_readidx) {
2010 		uint8_t ch = bus_space_read_1(sc->sc_tcm_iot, sc->sc_tcm_ioh,
2011 		    sc->sc_console_buf_addr + sc->sc_console_readidx);
2012 		sc->sc_console_readidx++;
2013 		if (sc->sc_console_readidx == sc->sc_console_buf_size)
2014 			sc->sc_console_readidx = 0;
2015 		if (ch == '\r')
2016 			continue;
2017 		DPRINTFN(3, ("%c", ch));
2018 	}
2019 }
2020 #endif
2021 
2022 int
2023 bwfm_pci_intr(void *v)
2024 {
2025 	struct bwfm_pci_softc *sc = (void *)v;
2026 	struct bwfm_softc *bwfm = (void *)sc;
2027 	struct ifnet *ifp = &sc->sc_sc.sc_ic.ic_if;
2028 	struct mbuf_list ml = MBUF_LIST_INITIALIZER();
2029 	uint32_t status, mask;
2030 
2031 	if (!sc->sc_initialized)
2032 		return 0;
2033 
2034 	status = bwfm_pci_intr_status(sc);
2035 	/* FIXME: interrupt status seems to be zero? */
2036 	if (status == 0 && bwfm->sc_chip.ch_chip == BRCM_CC_4378_CHIP_ID)
2037 		status |= BWFM_PCI_64_PCIE2REG_MAILBOXMASK_INT_D2H_DB;
2038 	if (status == 0)
2039 		return 0;
2040 
2041 	bwfm_pci_intr_disable(sc);
2042 	bwfm_pci_intr_ack(sc, status);
2043 
2044 	if (bwfm->sc_chip.ch_chip != BRCM_CC_4378_CHIP_ID &&
2045 	    (status & (BWFM_PCI_PCIE2REG_MAILBOXMASK_INT_FN0_0 |
2046 	    BWFM_PCI_PCIE2REG_MAILBOXMASK_INT_FN0_1)))
2047 		printf("%s: handle MB data\n", __func__);
2048 
2049 	mask = BWFM_PCI_PCIE2REG_MAILBOXMASK_INT_D2H_DB;
2050 	if (bwfm->sc_chip.ch_chip == BRCM_CC_4378_CHIP_ID)
2051 		mask = BWFM_PCI_64_PCIE2REG_MAILBOXMASK_INT_D2H_DB;
2052 
2053 	if (status & mask) {
2054 		bwfm_pci_ring_rx(sc, &sc->sc_rx_complete, &ml);
2055 		bwfm_pci_ring_rx(sc, &sc->sc_tx_complete, &ml);
2056 		bwfm_pci_ring_rx(sc, &sc->sc_ctrl_complete, &ml);
2057 
2058 		if (ifiq_input(&ifp->if_rcv, &ml))
2059 			if_rxr_livelocked(&sc->sc_rxbuf_ring);
2060 	}
2061 
2062 #ifdef BWFM_DEBUG
2063 	bwfm_pci_debug_console(sc);
2064 #endif
2065 
2066 	bwfm_pci_intr_enable(sc);
2067 	return 1;
2068 }
2069 
2070 void
2071 bwfm_pci_intr_enable(struct bwfm_pci_softc *sc)
2072 {
2073 	struct bwfm_softc *bwfm = (void *)sc;
2074 
2075 	if (bwfm->sc_chip.ch_chip == BRCM_CC_4378_CHIP_ID)
2076 		bus_space_write_4(sc->sc_reg_iot, sc->sc_reg_ioh,
2077 		    BWFM_PCI_64_PCIE2REG_MAILBOXMASK,
2078 		    BWFM_PCI_64_PCIE2REG_MAILBOXMASK_INT_D2H_DB);
2079 	else
2080 		bus_space_write_4(sc->sc_reg_iot, sc->sc_reg_ioh,
2081 		    BWFM_PCI_PCIE2REG_MAILBOXMASK,
2082 		    BWFM_PCI_PCIE2REG_MAILBOXMASK_INT_FN0_0 |
2083 		    BWFM_PCI_PCIE2REG_MAILBOXMASK_INT_FN0_1 |
2084 		    BWFM_PCI_PCIE2REG_MAILBOXMASK_INT_D2H_DB);
2085 }
2086 
2087 void
2088 bwfm_pci_intr_disable(struct bwfm_pci_softc *sc)
2089 {
2090 	struct bwfm_softc *bwfm = (void *)sc;
2091 
2092 	if (bwfm->sc_chip.ch_chip == BRCM_CC_4378_CHIP_ID)
2093 		bus_space_write_4(sc->sc_reg_iot, sc->sc_reg_ioh,
2094 		    BWFM_PCI_64_PCIE2REG_MAILBOXMASK, 0);
2095 	else
2096 		bus_space_write_4(sc->sc_reg_iot, sc->sc_reg_ioh,
2097 		    BWFM_PCI_PCIE2REG_MAILBOXMASK, 0);
2098 }
2099 
2100 uint32_t
2101 bwfm_pci_intr_status(struct bwfm_pci_softc *sc)
2102 {
2103 	struct bwfm_softc *bwfm = (void *)sc;
2104 
2105 	if (bwfm->sc_chip.ch_chip == BRCM_CC_4378_CHIP_ID)
2106 		return bus_space_read_4(sc->sc_reg_iot, sc->sc_reg_ioh,
2107 		    BWFM_PCI_64_PCIE2REG_MAILBOXINT);
2108 	else
2109 		return bus_space_read_4(sc->sc_reg_iot, sc->sc_reg_ioh,
2110 		    BWFM_PCI_PCIE2REG_MAILBOXINT);
2111 }
2112 
2113 void
2114 bwfm_pci_intr_ack(struct bwfm_pci_softc *sc, uint32_t status)
2115 {
2116 	struct bwfm_softc *bwfm = (void *)sc;
2117 
2118 	if (bwfm->sc_chip.ch_chip == BRCM_CC_4378_CHIP_ID)
2119 		bus_space_write_4(sc->sc_reg_iot, sc->sc_reg_ioh,
2120 		    BWFM_PCI_64_PCIE2REG_MAILBOXINT, status);
2121 	else
2122 		bus_space_write_4(sc->sc_reg_iot, sc->sc_reg_ioh,
2123 		    BWFM_PCI_PCIE2REG_MAILBOXINT, status);
2124 }
2125 
2126 void
2127 bwfm_pci_hostready(struct bwfm_pci_softc *sc)
2128 {
2129 	struct bwfm_softc *bwfm = (void *)sc;
2130 
2131 	if ((sc->sc_shared_flags & BWFM_SHARED_INFO_HOSTRDY_DB1) == 0)
2132 		return;
2133 
2134 	if (bwfm->sc_chip.ch_chip == BRCM_CC_4378_CHIP_ID)
2135 		bus_space_write_4(sc->sc_reg_iot, sc->sc_reg_ioh,
2136 		    BWFM_PCI_64_PCIE2REG_H2D_MAILBOX_1, 1);
2137 	else
2138 		bus_space_write_4(sc->sc_reg_iot, sc->sc_reg_ioh,
2139 		    BWFM_PCI_PCIE2REG_H2D_MAILBOX_1, 1);
2140 }
2141 
2142 /* Msgbuf protocol implementation */
2143 int
2144 bwfm_pci_msgbuf_query_dcmd(struct bwfm_softc *bwfm, int ifidx,
2145     int cmd, char *buf, size_t *len)
2146 {
2147 	struct bwfm_pci_softc *sc = (void *)bwfm;
2148 	struct msgbuf_ioctl_req_hdr *req;
2149 	struct bwfm_pci_ioctl *ctl;
2150 	struct mbuf *m;
2151 	uint32_t pktid;
2152 	paddr_t paddr;
2153 	size_t buflen;
2154 	int s;
2155 
2156 	buflen = min(*len, BWFM_DMA_H2D_IOCTL_BUF_LEN);
2157 	m = MCLGETL(NULL, M_DONTWAIT, buflen);
2158 	if (m == NULL)
2159 		return 1;
2160 	m->m_len = m->m_pkthdr.len = buflen;
2161 
2162 	if (buf)
2163 		memcpy(mtod(m, char *), buf, buflen);
2164 	else
2165 		memset(mtod(m, char *), 0, buflen);
2166 
2167 	s = splnet();
2168 	req = bwfm_pci_ring_write_reserve(sc, &sc->sc_ctrl_submit);
2169 	if (req == NULL) {
2170 		splx(s);
2171 		m_freem(m);
2172 		return 1;
2173 	}
2174 
2175 	if (bwfm_pci_pktid_new(sc, &sc->sc_ioctl_pkts, m, &pktid, &paddr)) {
2176 		bwfm_pci_ring_write_cancel(sc, &sc->sc_ctrl_submit, 1);
2177 		splx(s);
2178 		m_freem(m);
2179 		return 1;
2180 	}
2181 
2182 	ctl = malloc(sizeof(*ctl), M_TEMP, M_WAITOK|M_ZERO);
2183 	ctl->transid = sc->sc_ioctl_transid++;
2184 	TAILQ_INSERT_TAIL(&sc->sc_ioctlq, ctl, next);
2185 
2186 	req->msg.msgtype = MSGBUF_TYPE_IOCTLPTR_REQ;
2187 	req->msg.ifidx = 0;
2188 	req->msg.flags = 0;
2189 	req->msg.request_id = htole32(pktid);
2190 	req->cmd = htole32(cmd);
2191 	req->output_buf_len = htole16(*len);
2192 	req->trans_id = htole16(ctl->transid);
2193 
2194 	req->input_buf_len = htole16(m->m_len);
2195 	req->req_buf_addr.high_addr = htole32((uint64_t)paddr >> 32);
2196 	req->req_buf_addr.low_addr = htole32(paddr & 0xffffffff);
2197 
2198 	bwfm_pci_ring_write_commit(sc, &sc->sc_ctrl_submit);
2199 	splx(s);
2200 
2201 	tsleep_nsec(ctl, PWAIT, "bwfm", SEC_TO_NSEC(1));
2202 	TAILQ_REMOVE(&sc->sc_ioctlq, ctl, next);
2203 
2204 	if (ctl->m == NULL) {
2205 		free(ctl, M_TEMP, sizeof(*ctl));
2206 		return 1;
2207 	}
2208 
2209 	*len = min(ctl->retlen, m->m_len);
2210 	*len = min(*len, buflen);
2211 	if (buf)
2212 		m_copydata(ctl->m, 0, *len, buf);
2213 	m_freem(ctl->m);
2214 
2215 	if (ctl->status < 0) {
2216 		free(ctl, M_TEMP, sizeof(*ctl));
2217 		return 1;
2218 	}
2219 
2220 	free(ctl, M_TEMP, sizeof(*ctl));
2221 	return 0;
2222 }
2223 
2224 int
2225 bwfm_pci_msgbuf_set_dcmd(struct bwfm_softc *bwfm, int ifidx,
2226     int cmd, char *buf, size_t len)
2227 {
2228 	return bwfm_pci_msgbuf_query_dcmd(bwfm, ifidx, cmd, buf, &len);
2229 }
2230 
2231 void
2232 bwfm_pci_msgbuf_rxioctl(struct bwfm_pci_softc *sc,
2233     struct msgbuf_ioctl_resp_hdr *resp)
2234 {
2235 	struct bwfm_pci_ioctl *ctl, *tmp;
2236 	struct mbuf *m;
2237 
2238 	m = bwfm_pci_pktid_free(sc, &sc->sc_rx_pkts,
2239 	    letoh32(resp->msg.request_id));
2240 
2241 	TAILQ_FOREACH_SAFE(ctl, &sc->sc_ioctlq, next, tmp) {
2242 		if (ctl->transid != letoh16(resp->trans_id))
2243 			continue;
2244 		ctl->m = m;
2245 		ctl->retlen = letoh16(resp->resp_len);
2246 		ctl->status = letoh16(resp->compl_hdr.status);
2247 		wakeup(ctl);
2248 		return;
2249 	}
2250 
2251 	m_freem(m);
2252 }
2253