xref: /openbsd-src/sys/dev/pci/if_bwfm_pci.c (revision ffcef06798eb7b98532e76a80212f0772bebc4f6)
1 /*	$OpenBSD: if_bwfm_pci.c,v 1.56 2021/08/31 23:05:11 patrick Exp $	*/
2 /*
3  * Copyright (c) 2010-2016 Broadcom Corporation
4  * Copyright (c) 2017 Patrick Wildt <patrick@blueri.se>
5  *
6  * Permission to use, copy, modify, and/or distribute this software for any
7  * purpose with or without fee is hereby granted, provided that the above
8  * copyright notice and this permission notice appear in all copies.
9  *
10  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
11  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
12  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
13  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
14  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
15  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
16  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 #include "bpfilter.h"
20 
21 #include <sys/param.h>
22 #include <sys/systm.h>
23 #include <sys/buf.h>
24 #include <sys/kernel.h>
25 #include <sys/malloc.h>
26 #include <sys/device.h>
27 #include <sys/queue.h>
28 #include <sys/socket.h>
29 
30 #if defined(__HAVE_FDT)
31 #include <machine/fdt.h>
32 #include <dev/ofw/openfirm.h>
33 #endif
34 
35 #if NBPFILTER > 0
36 #include <net/bpf.h>
37 #endif
38 #include <net/if.h>
39 #include <net/if_dl.h>
40 #include <net/if_media.h>
41 
42 #include <netinet/in.h>
43 #include <netinet/if_ether.h>
44 
45 #include <net80211/ieee80211_var.h>
46 
47 #include <machine/bus.h>
48 
49 #include <dev/pci/pcireg.h>
50 #include <dev/pci/pcivar.h>
51 #include <dev/pci/pcidevs.h>
52 
53 #include <dev/ic/bwfmvar.h>
54 #include <dev/ic/bwfmreg.h>
55 #include <dev/pci/if_bwfm_pci.h>
56 
57 #define BWFM_DMA_D2H_SCRATCH_BUF_LEN		8
58 #define BWFM_DMA_D2H_RINGUPD_BUF_LEN		1024
59 #define BWFM_DMA_H2D_IOCTL_BUF_LEN		ETHER_MAX_LEN
60 
61 #define BWFM_NUM_TX_MSGRINGS			2
62 #define BWFM_NUM_RX_MSGRINGS			3
63 
64 #define BWFM_NUM_IOCTL_PKTIDS			8
65 #define BWFM_NUM_TX_PKTIDS			2048
66 #define BWFM_NUM_RX_PKTIDS			1024
67 
68 #define BWFM_NUM_IOCTL_DESCS			1
69 #define BWFM_NUM_TX_DESCS			1
70 #define BWFM_NUM_RX_DESCS			1
71 
72 #ifdef BWFM_DEBUG
73 #define DPRINTF(x)	do { if (bwfm_debug > 0) printf x; } while (0)
74 #define DPRINTFN(n, x)	do { if (bwfm_debug >= (n)) printf x; } while (0)
75 static int bwfm_debug = 2;
76 #else
77 #define DPRINTF(x)	do { ; } while (0)
78 #define DPRINTFN(n, x)	do { ; } while (0)
79 #endif
80 
81 #define DEVNAME(sc)	((sc)->sc_sc.sc_dev.dv_xname)
82 
83 enum ring_status {
84 	RING_CLOSED,
85 	RING_CLOSING,
86 	RING_OPEN,
87 	RING_OPENING,
88 };
89 
90 struct bwfm_pci_msgring {
91 	uint32_t		 w_idx_addr;
92 	uint32_t		 r_idx_addr;
93 	uint32_t		 w_ptr;
94 	uint32_t		 r_ptr;
95 	int			 nitem;
96 	int			 itemsz;
97 	enum ring_status	 status;
98 	struct bwfm_pci_dmamem	*ring;
99 	struct mbuf		*m;
100 
101 	int			 fifo;
102 	uint8_t			 mac[ETHER_ADDR_LEN];
103 };
104 
105 struct bwfm_pci_ioctl {
106 	uint16_t		 transid;
107 	uint16_t		 retlen;
108 	int16_t			 status;
109 	struct mbuf		*m;
110 	TAILQ_ENTRY(bwfm_pci_ioctl) next;
111 };
112 
113 struct bwfm_pci_buf {
114 	bus_dmamap_t	 bb_map;
115 	struct mbuf	*bb_m;
116 };
117 
118 struct bwfm_pci_pkts {
119 	struct bwfm_pci_buf	*pkts;
120 	uint32_t		 npkt;
121 	int			 last;
122 };
123 
124 struct bwfm_pci_softc {
125 	struct bwfm_softc	 sc_sc;
126 	pci_chipset_tag_t	 sc_pc;
127 	pcitag_t		 sc_tag;
128 	pcireg_t		 sc_id;
129 	void 			*sc_ih;
130 
131 	int			 sc_initialized;
132 
133 	bus_space_tag_t		 sc_reg_iot;
134 	bus_space_handle_t	 sc_reg_ioh;
135 	bus_size_t		 sc_reg_ios;
136 
137 	bus_space_tag_t		 sc_tcm_iot;
138 	bus_space_handle_t	 sc_tcm_ioh;
139 	bus_size_t		 sc_tcm_ios;
140 
141 	bus_dma_tag_t		 sc_dmat;
142 
143 	uint32_t		 sc_shared_address;
144 	uint32_t		 sc_shared_flags;
145 	uint8_t			 sc_shared_version;
146 
147 	uint8_t			 sc_dma_idx_sz;
148 	struct bwfm_pci_dmamem	*sc_dma_idx_buf;
149 	size_t			 sc_dma_idx_bufsz;
150 
151 	uint16_t		 sc_max_rxbufpost;
152 	uint32_t		 sc_rx_dataoffset;
153 	uint32_t		 sc_htod_mb_data_addr;
154 	uint32_t		 sc_dtoh_mb_data_addr;
155 	uint32_t		 sc_ring_info_addr;
156 
157 	uint32_t		 sc_console_base_addr;
158 	uint32_t		 sc_console_buf_addr;
159 	uint32_t		 sc_console_buf_size;
160 	uint32_t		 sc_console_readidx;
161 
162 	uint16_t		 sc_max_flowrings;
163 	uint16_t		 sc_max_submissionrings;
164 	uint16_t		 sc_max_completionrings;
165 
166 	struct bwfm_pci_msgring	 sc_ctrl_submit;
167 	struct bwfm_pci_msgring	 sc_rxpost_submit;
168 	struct bwfm_pci_msgring	 sc_ctrl_complete;
169 	struct bwfm_pci_msgring	 sc_tx_complete;
170 	struct bwfm_pci_msgring	 sc_rx_complete;
171 	struct bwfm_pci_msgring	*sc_flowrings;
172 
173 	struct bwfm_pci_dmamem	*sc_scratch_buf;
174 	struct bwfm_pci_dmamem	*sc_ringupd_buf;
175 
176 	TAILQ_HEAD(, bwfm_pci_ioctl) sc_ioctlq;
177 	uint16_t		 sc_ioctl_transid;
178 
179 	struct if_rxring	 sc_ioctl_ring;
180 	struct if_rxring	 sc_event_ring;
181 	struct if_rxring	 sc_rxbuf_ring;
182 
183 	struct bwfm_pci_pkts	 sc_ioctl_pkts;
184 	struct bwfm_pci_pkts	 sc_rx_pkts;
185 	struct bwfm_pci_pkts	 sc_tx_pkts;
186 	int			 sc_tx_pkts_full;
187 
188 	uint8_t			 sc_mbdata_done;
189 };
190 
191 struct bwfm_pci_dmamem {
192 	bus_dmamap_t		bdm_map;
193 	bus_dma_segment_t	bdm_seg;
194 	size_t			bdm_size;
195 	caddr_t			bdm_kva;
196 };
197 
198 #define BWFM_PCI_DMA_MAP(_bdm)	((_bdm)->bdm_map)
199 #define BWFM_PCI_DMA_LEN(_bdm)	((_bdm)->bdm_size)
200 #define BWFM_PCI_DMA_DVA(_bdm)	((uint64_t)(_bdm)->bdm_map->dm_segs[0].ds_addr)
201 #define BWFM_PCI_DMA_KVA(_bdm)	((void *)(_bdm)->bdm_kva)
202 
203 int		 bwfm_pci_match(struct device *, void *, void *);
204 void		 bwfm_pci_attach(struct device *, struct device *, void *);
205 int		 bwfm_pci_detach(struct device *, int);
206 int		 bwfm_pci_activate(struct device *, int);
207 void		 bwfm_pci_cleanup(struct bwfm_pci_softc *);
208 
209 #if defined(__HAVE_FDT)
210 int		 bwfm_pci_read_otp(struct bwfm_pci_softc *);
211 void		 bwfm_pci_process_otp_tuple(struct bwfm_pci_softc *, uint8_t,
212 		    uint8_t, uint8_t *);
213 #endif
214 
215 int		 bwfm_pci_intr(void *);
216 void		 bwfm_pci_intr_enable(struct bwfm_pci_softc *);
217 void		 bwfm_pci_intr_disable(struct bwfm_pci_softc *);
218 uint32_t	 bwfm_pci_intr_status(struct bwfm_pci_softc *);
219 void		 bwfm_pci_intr_ack(struct bwfm_pci_softc *, uint32_t);
220 uint32_t	 bwfm_pci_intmask(struct bwfm_pci_softc *);
221 void		 bwfm_pci_hostready(struct bwfm_pci_softc *);
222 int		 bwfm_pci_load_microcode(struct bwfm_pci_softc *, const u_char *,
223 		    size_t, const u_char *, size_t);
224 void		 bwfm_pci_select_core(struct bwfm_pci_softc *, int );
225 
226 struct bwfm_pci_dmamem *
227 		 bwfm_pci_dmamem_alloc(struct bwfm_pci_softc *, bus_size_t,
228 		    bus_size_t);
229 void		 bwfm_pci_dmamem_free(struct bwfm_pci_softc *, struct bwfm_pci_dmamem *);
230 int		 bwfm_pci_pktid_avail(struct bwfm_pci_softc *,
231 		    struct bwfm_pci_pkts *);
232 int		 bwfm_pci_pktid_new(struct bwfm_pci_softc *,
233 		    struct bwfm_pci_pkts *, struct mbuf *,
234 		    uint32_t *, paddr_t *);
235 struct mbuf *	 bwfm_pci_pktid_free(struct bwfm_pci_softc *,
236 		    struct bwfm_pci_pkts *, uint32_t);
237 void		 bwfm_pci_fill_rx_ioctl_ring(struct bwfm_pci_softc *,
238 		    struct if_rxring *, uint32_t);
239 void		 bwfm_pci_fill_rx_buf_ring(struct bwfm_pci_softc *);
240 void		 bwfm_pci_fill_rx_rings(struct bwfm_pci_softc *);
241 int		 bwfm_pci_setup_ring(struct bwfm_pci_softc *, struct bwfm_pci_msgring *,
242 		    int, size_t, uint32_t, uint32_t, int, uint32_t, uint32_t *);
243 int		 bwfm_pci_setup_flowring(struct bwfm_pci_softc *, struct bwfm_pci_msgring *,
244 		    int, size_t);
245 
246 void		 bwfm_pci_ring_bell(struct bwfm_pci_softc *,
247 		    struct bwfm_pci_msgring *);
248 void		 bwfm_pci_ring_update_rptr(struct bwfm_pci_softc *,
249 		    struct bwfm_pci_msgring *);
250 void		 bwfm_pci_ring_update_wptr(struct bwfm_pci_softc *,
251 		    struct bwfm_pci_msgring *);
252 void		 bwfm_pci_ring_write_rptr(struct bwfm_pci_softc *,
253 		    struct bwfm_pci_msgring *);
254 void		 bwfm_pci_ring_write_wptr(struct bwfm_pci_softc *,
255 		    struct bwfm_pci_msgring *);
256 void *		 bwfm_pci_ring_write_reserve(struct bwfm_pci_softc *,
257 		    struct bwfm_pci_msgring *);
258 void *		 bwfm_pci_ring_write_reserve_multi(struct bwfm_pci_softc *,
259 		    struct bwfm_pci_msgring *, int, int *);
260 void *		 bwfm_pci_ring_read_avail(struct bwfm_pci_softc *,
261 		    struct bwfm_pci_msgring *, int *);
262 void		 bwfm_pci_ring_read_commit(struct bwfm_pci_softc *,
263 		    struct bwfm_pci_msgring *, int);
264 void		 bwfm_pci_ring_write_commit(struct bwfm_pci_softc *,
265 		    struct bwfm_pci_msgring *);
266 void		 bwfm_pci_ring_write_cancel(struct bwfm_pci_softc *,
267 		    struct bwfm_pci_msgring *, int);
268 
269 void		 bwfm_pci_ring_rx(struct bwfm_pci_softc *,
270 		    struct bwfm_pci_msgring *, struct mbuf_list *);
271 void		 bwfm_pci_msg_rx(struct bwfm_pci_softc *, void *,
272 		    struct mbuf_list *);
273 
274 uint32_t	 bwfm_pci_buscore_read(struct bwfm_softc *, uint32_t);
275 void		 bwfm_pci_buscore_write(struct bwfm_softc *, uint32_t,
276 		    uint32_t);
277 int		 bwfm_pci_buscore_prepare(struct bwfm_softc *);
278 int		 bwfm_pci_buscore_reset(struct bwfm_softc *);
279 void		 bwfm_pci_buscore_activate(struct bwfm_softc *, uint32_t);
280 
281 int		 bwfm_pci_flowring_lookup(struct bwfm_pci_softc *,
282 		     struct mbuf *);
283 void		 bwfm_pci_flowring_create(struct bwfm_pci_softc *,
284 		     struct mbuf *);
285 void		 bwfm_pci_flowring_create_cb(struct bwfm_softc *, void *);
286 void		 bwfm_pci_flowring_delete(struct bwfm_pci_softc *, int);
287 
288 int		 bwfm_pci_preinit(struct bwfm_softc *);
289 void		 bwfm_pci_stop(struct bwfm_softc *);
290 int		 bwfm_pci_txcheck(struct bwfm_softc *);
291 int		 bwfm_pci_txdata(struct bwfm_softc *, struct mbuf *);
292 
293 int		 bwfm_pci_send_mb_data(struct bwfm_pci_softc *, uint32_t);
294 void		 bwfm_pci_handle_mb_data(struct bwfm_pci_softc *);
295 
296 #ifdef BWFM_DEBUG
297 void		 bwfm_pci_debug_console(struct bwfm_pci_softc *);
298 #endif
299 
300 int		 bwfm_pci_msgbuf_query_dcmd(struct bwfm_softc *, int,
301 		    int, char *, size_t *);
302 int		 bwfm_pci_msgbuf_set_dcmd(struct bwfm_softc *, int,
303 		    int, char *, size_t);
304 void		 bwfm_pci_msgbuf_rxioctl(struct bwfm_pci_softc *,
305 		    struct msgbuf_ioctl_resp_hdr *);
306 
307 struct bwfm_buscore_ops bwfm_pci_buscore_ops = {
308 	.bc_read = bwfm_pci_buscore_read,
309 	.bc_write = bwfm_pci_buscore_write,
310 	.bc_prepare = bwfm_pci_buscore_prepare,
311 	.bc_reset = bwfm_pci_buscore_reset,
312 	.bc_setup = NULL,
313 	.bc_activate = bwfm_pci_buscore_activate,
314 };
315 
316 struct bwfm_bus_ops bwfm_pci_bus_ops = {
317 	.bs_preinit = bwfm_pci_preinit,
318 	.bs_stop = bwfm_pci_stop,
319 	.bs_txcheck = bwfm_pci_txcheck,
320 	.bs_txdata = bwfm_pci_txdata,
321 	.bs_txctl = NULL,
322 };
323 
324 struct bwfm_proto_ops bwfm_pci_msgbuf_ops = {
325 	.proto_query_dcmd = bwfm_pci_msgbuf_query_dcmd,
326 	.proto_set_dcmd = bwfm_pci_msgbuf_set_dcmd,
327 	.proto_rx = NULL,
328 	.proto_rxctl = NULL,
329 };
330 
331 struct cfattach bwfm_pci_ca = {
332 	sizeof(struct bwfm_pci_softc),
333 	bwfm_pci_match,
334 	bwfm_pci_attach,
335 	bwfm_pci_detach,
336 	bwfm_pci_activate,
337 };
338 
339 static const struct pci_matchid bwfm_pci_devices[] = {
340 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM4350 },
341 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM4356 },
342 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM43602 },
343 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM4371 },
344 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM4378 },
345 };
346 
347 int
348 bwfm_pci_match(struct device *parent, void *match, void *aux)
349 {
350 	return (pci_matchbyid(aux, bwfm_pci_devices,
351 	    nitems(bwfm_pci_devices)));
352 }
353 
354 void
355 bwfm_pci_attach(struct device *parent, struct device *self, void *aux)
356 {
357 	struct bwfm_pci_softc *sc = (struct bwfm_pci_softc *)self;
358 	struct pci_attach_args *pa = (struct pci_attach_args *)aux;
359 	const char *intrstr;
360 	pci_intr_handle_t ih;
361 
362 	if (pci_mapreg_map(pa, PCI_MAPREG_START + 0x08,
363 	    PCI_MAPREG_MEM_TYPE_64BIT, 0, &sc->sc_tcm_iot, &sc->sc_tcm_ioh,
364 	    NULL, &sc->sc_tcm_ios, 0)) {
365 		printf(": can't map bar1\n");
366 		return;
367 	}
368 
369 	if (pci_mapreg_map(pa, PCI_MAPREG_START + 0x00,
370 	    PCI_MAPREG_MEM_TYPE_64BIT, 0, &sc->sc_reg_iot, &sc->sc_reg_ioh,
371 	    NULL, &sc->sc_reg_ios, 0)) {
372 		printf(": can't map bar0\n");
373 		goto bar1;
374 	}
375 
376 	sc->sc_pc = pa->pa_pc;
377 	sc->sc_tag = pa->pa_tag;
378 	sc->sc_id = pa->pa_id;
379 	sc->sc_dmat = pa->pa_dmat;
380 
381 	/* Map and establish the interrupt. */
382 	if (pci_intr_map_msi(pa, &ih) != 0 && pci_intr_map(pa, &ih) != 0) {
383 		printf(": couldn't map interrupt\n");
384 		goto bar0;
385 	}
386 	intrstr = pci_intr_string(pa->pa_pc, ih);
387 
388 	sc->sc_ih = pci_intr_establish(pa->pa_pc, ih, IPL_NET,
389 	    bwfm_pci_intr, sc, DEVNAME(sc));
390 	if (sc->sc_ih == NULL) {
391 		printf(": couldn't establish interrupt");
392 		if (intrstr != NULL)
393 			printf(" at %s", intrstr);
394 		printf("\n");
395 		goto bar1;
396 	}
397 	printf(": %s\n", intrstr);
398 
399 	sc->sc_sc.sc_bus_ops = &bwfm_pci_bus_ops;
400 	sc->sc_sc.sc_proto_ops = &bwfm_pci_msgbuf_ops;
401 	bwfm_attach(&sc->sc_sc);
402 	config_mountroot(self, bwfm_attachhook);
403 	return;
404 
405 bar0:
406 	bus_space_unmap(sc->sc_reg_iot, sc->sc_reg_ioh, sc->sc_reg_ios);
407 bar1:
408 	bus_space_unmap(sc->sc_tcm_iot, sc->sc_tcm_ioh, sc->sc_tcm_ios);
409 }
410 
411 int
412 bwfm_pci_preinit(struct bwfm_softc *bwfm)
413 {
414 	struct bwfm_pci_softc *sc = (void *)bwfm;
415 	struct bwfm_pci_ringinfo ringinfo;
416 	const char *chip = NULL;
417 	u_char *ucode, *nvram;
418 	size_t size, nvsize, nvlen;
419 	uint32_t d2h_w_idx_ptr, d2h_r_idx_ptr;
420 	uint32_t h2d_w_idx_ptr, h2d_r_idx_ptr;
421 	uint32_t idx_offset, reg;
422 	int i;
423 
424 	if (sc->sc_initialized)
425 		return 0;
426 
427 	sc->sc_sc.sc_buscore_ops = &bwfm_pci_buscore_ops;
428 	if (bwfm_chip_attach(&sc->sc_sc) != 0) {
429 		printf("%s: cannot attach chip\n", DEVNAME(sc));
430 		return 1;
431 	}
432 
433 #if defined(__HAVE_FDT)
434 	if (bwfm_pci_read_otp(sc)) {
435 		printf("%s: cannot read OTP\n", DEVNAME(sc));
436 		return 1;
437 	}
438 #endif
439 
440 	bwfm_pci_select_core(sc, BWFM_AGENT_CORE_PCIE2);
441 	bus_space_write_4(sc->sc_reg_iot, sc->sc_reg_ioh,
442 	    BWFM_PCI_PCIE2REG_CONFIGADDR, 0x4e0);
443 	reg = bus_space_read_4(sc->sc_reg_iot, sc->sc_reg_ioh,
444 	    BWFM_PCI_PCIE2REG_CONFIGDATA);
445 	bus_space_write_4(sc->sc_reg_iot, sc->sc_reg_ioh,
446 	    BWFM_PCI_PCIE2REG_CONFIGDATA, reg);
447 
448 	switch (bwfm->sc_chip.ch_chip)
449 	{
450 	case BRCM_CC_4350_CHIP_ID:
451 		if (bwfm->sc_chip.ch_chiprev > 7)
452 			chip = "4350";
453 		else
454 			chip = "4350c2";
455 		break;
456 	case BRCM_CC_4356_CHIP_ID:
457 		chip = "4356";
458 		break;
459 	case BRCM_CC_43602_CHIP_ID:
460 		chip = "43602";
461 		break;
462 	case BRCM_CC_4371_CHIP_ID:
463 		chip = "4371";
464 		break;
465 	case BRCM_CC_4378_CHIP_ID:
466 		chip = "4378";
467 		break;
468 	default:
469 		printf("%s: unknown firmware for chip %s\n",
470 		    DEVNAME(sc), bwfm->sc_chip.ch_name);
471 		return 1;
472 	}
473 
474 	if (bwfm_loadfirmware(bwfm, chip, "-pcie", &ucode, &size,
475 	    &nvram, &nvsize, &nvlen) != 0)
476 		return 1;
477 
478 	/* Retrieve RAM size from firmware. */
479 	if (size >= BWFM_RAMSIZE + 8) {
480 		uint32_t *ramsize = (uint32_t *)&ucode[BWFM_RAMSIZE];
481 		if (letoh32(ramsize[0]) == BWFM_RAMSIZE_MAGIC)
482 			bwfm->sc_chip.ch_ramsize = letoh32(ramsize[1]);
483 	}
484 
485 	if (bwfm_pci_load_microcode(sc, ucode, size, nvram, nvlen) != 0) {
486 		printf("%s: could not load microcode\n",
487 		    DEVNAME(sc));
488 		free(ucode, M_DEVBUF, size);
489 		free(nvram, M_DEVBUF, nvsize);
490 		return 1;
491 	}
492 	free(ucode, M_DEVBUF, size);
493 	free(nvram, M_DEVBUF, nvsize);
494 
495 	sc->sc_shared_flags = bus_space_read_4(sc->sc_tcm_iot, sc->sc_tcm_ioh,
496 	    sc->sc_shared_address + BWFM_SHARED_INFO);
497 	sc->sc_shared_version = sc->sc_shared_flags;
498 	if (sc->sc_shared_version > BWFM_SHARED_INFO_MAX_VERSION ||
499 	    sc->sc_shared_version < BWFM_SHARED_INFO_MIN_VERSION) {
500 		printf("%s: PCIe version %d unsupported\n",
501 		    DEVNAME(sc), sc->sc_shared_version);
502 		return 1;
503 	}
504 
505 	sc->sc_dma_idx_sz = 0;
506 	if (sc->sc_shared_flags & BWFM_SHARED_INFO_DMA_INDEX) {
507 		if (sc->sc_shared_flags & BWFM_SHARED_INFO_DMA_2B_IDX)
508 			sc->sc_dma_idx_sz = sizeof(uint16_t);
509 		else
510 			sc->sc_dma_idx_sz = sizeof(uint32_t);
511 	}
512 
513 	/* Maximum RX data buffers in the ring. */
514 	sc->sc_max_rxbufpost = bus_space_read_2(sc->sc_tcm_iot, sc->sc_tcm_ioh,
515 	    sc->sc_shared_address + BWFM_SHARED_MAX_RXBUFPOST);
516 	if (sc->sc_max_rxbufpost == 0)
517 		sc->sc_max_rxbufpost = BWFM_SHARED_MAX_RXBUFPOST_DEFAULT;
518 
519 	/* Alternative offset of data in a packet */
520 	sc->sc_rx_dataoffset = bus_space_read_4(sc->sc_tcm_iot, sc->sc_tcm_ioh,
521 	    sc->sc_shared_address + BWFM_SHARED_RX_DATAOFFSET);
522 
523 	/* For Power Management */
524 	sc->sc_htod_mb_data_addr = bus_space_read_4(sc->sc_tcm_iot, sc->sc_tcm_ioh,
525 	    sc->sc_shared_address + BWFM_SHARED_HTOD_MB_DATA_ADDR);
526 	sc->sc_dtoh_mb_data_addr = bus_space_read_4(sc->sc_tcm_iot, sc->sc_tcm_ioh,
527 	    sc->sc_shared_address + BWFM_SHARED_DTOH_MB_DATA_ADDR);
528 
529 	/* Ring information */
530 	sc->sc_ring_info_addr = bus_space_read_4(sc->sc_tcm_iot, sc->sc_tcm_ioh,
531 	    sc->sc_shared_address + BWFM_SHARED_RING_INFO_ADDR);
532 
533 	/* Firmware's "dmesg" */
534 	sc->sc_console_base_addr = bus_space_read_4(sc->sc_tcm_iot, sc->sc_tcm_ioh,
535 	    sc->sc_shared_address + BWFM_SHARED_CONSOLE_ADDR);
536 	sc->sc_console_buf_addr = bus_space_read_4(sc->sc_tcm_iot, sc->sc_tcm_ioh,
537 	    sc->sc_console_base_addr + BWFM_CONSOLE_BUFADDR);
538 	sc->sc_console_buf_size = bus_space_read_4(sc->sc_tcm_iot, sc->sc_tcm_ioh,
539 	    sc->sc_console_base_addr + BWFM_CONSOLE_BUFSIZE);
540 
541 	/* Read ring information. */
542 	bus_space_read_region_1(sc->sc_tcm_iot, sc->sc_tcm_ioh,
543 	    sc->sc_ring_info_addr, (void *)&ringinfo, sizeof(ringinfo));
544 
545 	if (sc->sc_shared_version >= 6) {
546 		sc->sc_max_submissionrings = le16toh(ringinfo.max_submissionrings);
547 		sc->sc_max_flowrings = le16toh(ringinfo.max_flowrings);
548 		sc->sc_max_completionrings = le16toh(ringinfo.max_completionrings);
549 	} else {
550 		sc->sc_max_submissionrings = le16toh(ringinfo.max_flowrings);
551 		sc->sc_max_flowrings = sc->sc_max_submissionrings -
552 		    BWFM_NUM_TX_MSGRINGS;
553 		sc->sc_max_completionrings = BWFM_NUM_RX_MSGRINGS;
554 	}
555 
556 	if (sc->sc_dma_idx_sz == 0) {
557 		d2h_w_idx_ptr = letoh32(ringinfo.d2h_w_idx_ptr);
558 		d2h_r_idx_ptr = letoh32(ringinfo.d2h_r_idx_ptr);
559 		h2d_w_idx_ptr = letoh32(ringinfo.h2d_w_idx_ptr);
560 		h2d_r_idx_ptr = letoh32(ringinfo.h2d_r_idx_ptr);
561 		idx_offset = sizeof(uint32_t);
562 	} else {
563 		uint64_t address;
564 
565 		/* Each TX/RX Ring has a Read and Write Ptr */
566 		sc->sc_dma_idx_bufsz = (sc->sc_max_submissionrings +
567 		    sc->sc_max_completionrings) * sc->sc_dma_idx_sz * 2;
568 		sc->sc_dma_idx_buf = bwfm_pci_dmamem_alloc(sc,
569 		    sc->sc_dma_idx_bufsz, 8);
570 		if (sc->sc_dma_idx_buf == NULL) {
571 			/* XXX: Fallback to TCM? */
572 			printf("%s: cannot allocate idx buf\n",
573 			    DEVNAME(sc));
574 			return 1;
575 		}
576 
577 		idx_offset = sc->sc_dma_idx_sz;
578 		h2d_w_idx_ptr = 0;
579 		address = BWFM_PCI_DMA_DVA(sc->sc_dma_idx_buf);
580 		ringinfo.h2d_w_idx_hostaddr_low =
581 		    htole32(address & 0xffffffff);
582 		ringinfo.h2d_w_idx_hostaddr_high =
583 		    htole32(address >> 32);
584 
585 		h2d_r_idx_ptr = h2d_w_idx_ptr +
586 		    sc->sc_max_submissionrings * idx_offset;
587 		address += sc->sc_max_submissionrings * idx_offset;
588 		ringinfo.h2d_r_idx_hostaddr_low =
589 		    htole32(address & 0xffffffff);
590 		ringinfo.h2d_r_idx_hostaddr_high =
591 		    htole32(address >> 32);
592 
593 		d2h_w_idx_ptr = h2d_r_idx_ptr +
594 		    sc->sc_max_submissionrings * idx_offset;
595 		address += sc->sc_max_submissionrings * idx_offset;
596 		ringinfo.d2h_w_idx_hostaddr_low =
597 		    htole32(address & 0xffffffff);
598 		ringinfo.d2h_w_idx_hostaddr_high =
599 		    htole32(address >> 32);
600 
601 		d2h_r_idx_ptr = d2h_w_idx_ptr +
602 		    sc->sc_max_completionrings * idx_offset;
603 		address += sc->sc_max_completionrings * idx_offset;
604 		ringinfo.d2h_r_idx_hostaddr_low =
605 		    htole32(address & 0xffffffff);
606 		ringinfo.d2h_r_idx_hostaddr_high =
607 		    htole32(address >> 32);
608 
609 		bus_space_write_region_1(sc->sc_tcm_iot, sc->sc_tcm_ioh,
610 		    sc->sc_ring_info_addr, (void *)&ringinfo, sizeof(ringinfo));
611 	}
612 
613 	uint32_t ring_mem_ptr = letoh32(ringinfo.ringmem);
614 	/* TX ctrl ring: Send ctrl buffers, send IOCTLs */
615 	if (bwfm_pci_setup_ring(sc, &sc->sc_ctrl_submit, 64, 40,
616 	    h2d_w_idx_ptr, h2d_r_idx_ptr, 0, idx_offset,
617 	    &ring_mem_ptr))
618 		goto cleanup;
619 	/* TX rxpost ring: Send clean data mbufs for RX */
620 	if (bwfm_pci_setup_ring(sc, &sc->sc_rxpost_submit, 512, 32,
621 	    h2d_w_idx_ptr, h2d_r_idx_ptr, 1, idx_offset,
622 	    &ring_mem_ptr))
623 		goto cleanup;
624 	/* RX completion rings: recv our filled buffers back */
625 	if (bwfm_pci_setup_ring(sc, &sc->sc_ctrl_complete, 64, 24,
626 	    d2h_w_idx_ptr, d2h_r_idx_ptr, 0, idx_offset,
627 	    &ring_mem_ptr))
628 		goto cleanup;
629 	if (bwfm_pci_setup_ring(sc, &sc->sc_tx_complete, 1024,
630 	    sc->sc_shared_version >= 7 ? 24 : 16,
631 	    d2h_w_idx_ptr, d2h_r_idx_ptr, 1, idx_offset,
632 	    &ring_mem_ptr))
633 		goto cleanup;
634 	if (bwfm_pci_setup_ring(sc, &sc->sc_rx_complete, 512,
635 	    sc->sc_shared_version >= 7 ? 40 : 32,
636 	    d2h_w_idx_ptr, d2h_r_idx_ptr, 2, idx_offset,
637 	    &ring_mem_ptr))
638 		goto cleanup;
639 
640 	/* Dynamic TX rings for actual data */
641 	sc->sc_flowrings = malloc(sc->sc_max_flowrings *
642 	    sizeof(struct bwfm_pci_msgring), M_DEVBUF, M_WAITOK | M_ZERO);
643 	for (i = 0; i < sc->sc_max_flowrings; i++) {
644 		struct bwfm_pci_msgring *ring = &sc->sc_flowrings[i];
645 		ring->w_idx_addr = h2d_w_idx_ptr + (i + 2) * idx_offset;
646 		ring->r_idx_addr = h2d_r_idx_ptr + (i + 2) * idx_offset;
647 	}
648 
649 	/* Scratch and ring update buffers for firmware */
650 	if ((sc->sc_scratch_buf = bwfm_pci_dmamem_alloc(sc,
651 	    BWFM_DMA_D2H_SCRATCH_BUF_LEN, 8)) == NULL)
652 		goto cleanup;
653 	bus_space_write_4(sc->sc_tcm_iot, sc->sc_tcm_ioh,
654 	    sc->sc_shared_address + BWFM_SHARED_DMA_SCRATCH_ADDR_LOW,
655 	    BWFM_PCI_DMA_DVA(sc->sc_scratch_buf) & 0xffffffff);
656 	bus_space_write_4(sc->sc_tcm_iot, sc->sc_tcm_ioh,
657 	    sc->sc_shared_address + BWFM_SHARED_DMA_SCRATCH_ADDR_HIGH,
658 	    BWFM_PCI_DMA_DVA(sc->sc_scratch_buf) >> 32);
659 	bus_space_write_4(sc->sc_tcm_iot, sc->sc_tcm_ioh,
660 	    sc->sc_shared_address + BWFM_SHARED_DMA_SCRATCH_LEN,
661 	    BWFM_DMA_D2H_SCRATCH_BUF_LEN);
662 
663 	if ((sc->sc_ringupd_buf = bwfm_pci_dmamem_alloc(sc,
664 	    BWFM_DMA_D2H_RINGUPD_BUF_LEN, 8)) == NULL)
665 		goto cleanup;
666 	bus_space_write_4(sc->sc_tcm_iot, sc->sc_tcm_ioh,
667 	    sc->sc_shared_address + BWFM_SHARED_DMA_RINGUPD_ADDR_LOW,
668 	    BWFM_PCI_DMA_DVA(sc->sc_ringupd_buf) & 0xffffffff);
669 	bus_space_write_4(sc->sc_tcm_iot, sc->sc_tcm_ioh,
670 	    sc->sc_shared_address + BWFM_SHARED_DMA_RINGUPD_ADDR_HIGH,
671 	    BWFM_PCI_DMA_DVA(sc->sc_ringupd_buf) >> 32);
672 	bus_space_write_4(sc->sc_tcm_iot, sc->sc_tcm_ioh,
673 	    sc->sc_shared_address + BWFM_SHARED_DMA_RINGUPD_LEN,
674 	    BWFM_DMA_D2H_RINGUPD_BUF_LEN);
675 
676 	bwfm_pci_select_core(sc, BWFM_AGENT_CORE_PCIE2);
677 	bwfm_pci_intr_enable(sc);
678 	bwfm_pci_hostready(sc);
679 
680 	/* Maps RX mbufs to a packet id and back. */
681 	sc->sc_rx_pkts.npkt = BWFM_NUM_RX_PKTIDS;
682 	sc->sc_rx_pkts.pkts = malloc(BWFM_NUM_RX_PKTIDS *
683 	    sizeof(struct bwfm_pci_buf), M_DEVBUF, M_WAITOK | M_ZERO);
684 	for (i = 0; i < BWFM_NUM_RX_PKTIDS; i++)
685 		bus_dmamap_create(sc->sc_dmat, MSGBUF_MAX_CTL_PKT_SIZE,
686 		    BWFM_NUM_RX_DESCS, MSGBUF_MAX_CTL_PKT_SIZE, 0, BUS_DMA_WAITOK,
687 		    &sc->sc_rx_pkts.pkts[i].bb_map);
688 
689 	/* Maps TX mbufs to a packet id and back. */
690 	sc->sc_tx_pkts.npkt = BWFM_NUM_TX_PKTIDS;
691 	sc->sc_tx_pkts.pkts = malloc(BWFM_NUM_TX_PKTIDS
692 	    * sizeof(struct bwfm_pci_buf), M_DEVBUF, M_WAITOK | M_ZERO);
693 	for (i = 0; i < BWFM_NUM_TX_PKTIDS; i++)
694 		bus_dmamap_create(sc->sc_dmat, MSGBUF_MAX_PKT_SIZE,
695 		    BWFM_NUM_TX_DESCS, MSGBUF_MAX_PKT_SIZE, 0, BUS_DMA_WAITOK,
696 		    &sc->sc_tx_pkts.pkts[i].bb_map);
697 	sc->sc_tx_pkts_full = 0;
698 
699 	/* Maps IOCTL mbufs to a packet id and back. */
700 	sc->sc_ioctl_pkts.npkt = BWFM_NUM_IOCTL_PKTIDS;
701 	sc->sc_ioctl_pkts.pkts = malloc(BWFM_NUM_IOCTL_PKTIDS
702 	    * sizeof(struct bwfm_pci_buf), M_DEVBUF, M_WAITOK | M_ZERO);
703 	for (i = 0; i < BWFM_NUM_IOCTL_PKTIDS; i++)
704 		bus_dmamap_create(sc->sc_dmat, MSGBUF_MAX_PKT_SIZE,
705 		    BWFM_NUM_IOCTL_DESCS, MSGBUF_MAX_PKT_SIZE, 0, BUS_DMA_WAITOK,
706 		    &sc->sc_ioctl_pkts.pkts[i].bb_map);
707 
708 	/*
709 	 * For whatever reason, could also be a bug somewhere in this
710 	 * driver, the firmware needs a bunch of RX buffers otherwise
711 	 * it won't send any RX complete messages.
712 	 */
713 	if_rxr_init(&sc->sc_rxbuf_ring, min(256, sc->sc_max_rxbufpost),
714 	    sc->sc_max_rxbufpost);
715 	if_rxr_init(&sc->sc_ioctl_ring, 8, 8);
716 	if_rxr_init(&sc->sc_event_ring, 8, 8);
717 	bwfm_pci_fill_rx_rings(sc);
718 
719 	TAILQ_INIT(&sc->sc_ioctlq);
720 
721 #ifdef BWFM_DEBUG
722 	sc->sc_console_readidx = 0;
723 	bwfm_pci_debug_console(sc);
724 #endif
725 
726 	sc->sc_initialized = 1;
727 	return 0;
728 
729 cleanup:
730 	if (sc->sc_ringupd_buf)
731 		bwfm_pci_dmamem_free(sc, sc->sc_ringupd_buf);
732 	if (sc->sc_scratch_buf)
733 		bwfm_pci_dmamem_free(sc, sc->sc_scratch_buf);
734 	if (sc->sc_rx_complete.ring)
735 		bwfm_pci_dmamem_free(sc, sc->sc_rx_complete.ring);
736 	if (sc->sc_tx_complete.ring)
737 		bwfm_pci_dmamem_free(sc, sc->sc_tx_complete.ring);
738 	if (sc->sc_ctrl_complete.ring)
739 		bwfm_pci_dmamem_free(sc, sc->sc_ctrl_complete.ring);
740 	if (sc->sc_rxpost_submit.ring)
741 		bwfm_pci_dmamem_free(sc, sc->sc_rxpost_submit.ring);
742 	if (sc->sc_ctrl_submit.ring)
743 		bwfm_pci_dmamem_free(sc, sc->sc_ctrl_submit.ring);
744 	if (sc->sc_dma_idx_buf)
745 		bwfm_pci_dmamem_free(sc, sc->sc_dma_idx_buf);
746 	return 1;
747 }
748 
749 int
750 bwfm_pci_load_microcode(struct bwfm_pci_softc *sc, const u_char *ucode, size_t size,
751     const u_char *nvram, size_t nvlen)
752 {
753 	struct bwfm_softc *bwfm = (void *)sc;
754 	struct bwfm_core *core;
755 	uint32_t shared, written;
756 	int i;
757 
758 	if (bwfm->sc_chip.ch_chip == BRCM_CC_43602_CHIP_ID) {
759 		bwfm_pci_select_core(sc, BWFM_AGENT_CORE_ARM_CR4);
760 		bus_space_write_4(sc->sc_reg_iot, sc->sc_reg_ioh,
761 		    BWFM_PCI_ARMCR4REG_BANKIDX, 5);
762 		bus_space_write_4(sc->sc_reg_iot, sc->sc_reg_ioh,
763 		    BWFM_PCI_ARMCR4REG_BANKPDA, 0);
764 		bus_space_write_4(sc->sc_reg_iot, sc->sc_reg_ioh,
765 		    BWFM_PCI_ARMCR4REG_BANKIDX, 7);
766 		bus_space_write_4(sc->sc_reg_iot, sc->sc_reg_ioh,
767 		    BWFM_PCI_ARMCR4REG_BANKPDA, 0);
768 	}
769 
770 	for (i = 0; i < size; i++)
771 		bus_space_write_1(sc->sc_tcm_iot, sc->sc_tcm_ioh,
772 		    bwfm->sc_chip.ch_rambase + i, ucode[i]);
773 
774 	/* Firmware replaces this with a pointer once up. */
775 	bus_space_write_4(sc->sc_tcm_iot, sc->sc_tcm_ioh,
776 	    bwfm->sc_chip.ch_rambase + bwfm->sc_chip.ch_ramsize - 4, 0);
777 
778 	if (nvram) {
779 		for (i = 0; i < nvlen; i++)
780 			bus_space_write_1(sc->sc_tcm_iot, sc->sc_tcm_ioh,
781 			    bwfm->sc_chip.ch_rambase + bwfm->sc_chip.ch_ramsize
782 			    - nvlen  + i, nvram[i]);
783 	}
784 
785 	written = bus_space_read_4(sc->sc_tcm_iot, sc->sc_tcm_ioh,
786 	    bwfm->sc_chip.ch_rambase + bwfm->sc_chip.ch_ramsize - 4);
787 
788 	/* Load reset vector from firmware and kickstart core. */
789 	if (bwfm->sc_chip.ch_chip == BRCM_CC_43602_CHIP_ID) {
790 		core = bwfm_chip_get_core(bwfm, BWFM_AGENT_INTERNAL_MEM);
791 		bwfm->sc_chip.ch_core_reset(bwfm, core, 0, 0, 0);
792 	}
793 	bwfm_chip_set_active(bwfm, *(uint32_t *)ucode);
794 
795 	for (i = 0; i < 100; i++) {
796 		delay(50 * 1000);
797 		shared = bus_space_read_4(sc->sc_tcm_iot, sc->sc_tcm_ioh,
798 		    bwfm->sc_chip.ch_rambase + bwfm->sc_chip.ch_ramsize - 4);
799 		if (shared != written)
800 			break;
801 	}
802 	if (shared == written) {
803 		printf("%s: firmware did not come up\n", DEVNAME(sc));
804 		return 1;
805 	}
806 	if (shared < bwfm->sc_chip.ch_rambase ||
807 	    shared >= bwfm->sc_chip.ch_rambase + bwfm->sc_chip.ch_ramsize) {
808 		printf("%s: invalid shared RAM address 0x%08x\n", DEVNAME(sc),
809 		    shared);
810 		return 1;
811 	}
812 
813 	sc->sc_shared_address = shared;
814 	return 0;
815 }
816 
817 int
818 bwfm_pci_detach(struct device *self, int flags)
819 {
820 	struct bwfm_pci_softc *sc = (struct bwfm_pci_softc *)self;
821 
822 	bwfm_detach(&sc->sc_sc, flags);
823 	bwfm_pci_cleanup(sc);
824 
825 	return 0;
826 }
827 
828 void
829 bwfm_pci_cleanup(struct bwfm_pci_softc *sc)
830 {
831 	int i;
832 
833 	for (i = 0; i < BWFM_NUM_RX_PKTIDS; i++) {
834 		bus_dmamap_destroy(sc->sc_dmat, sc->sc_rx_pkts.pkts[i].bb_map);
835 		if (sc->sc_rx_pkts.pkts[i].bb_m)
836 			m_freem(sc->sc_rx_pkts.pkts[i].bb_m);
837 	}
838 	free(sc->sc_rx_pkts.pkts, M_DEVBUF, BWFM_NUM_RX_PKTIDS *
839 	    sizeof(struct bwfm_pci_buf));
840 
841 	for (i = 0; i < BWFM_NUM_TX_PKTIDS; i++) {
842 		bus_dmamap_destroy(sc->sc_dmat, sc->sc_tx_pkts.pkts[i].bb_map);
843 		if (sc->sc_tx_pkts.pkts[i].bb_m)
844 			m_freem(sc->sc_tx_pkts.pkts[i].bb_m);
845 	}
846 	free(sc->sc_tx_pkts.pkts, M_DEVBUF, BWFM_NUM_TX_PKTIDS *
847 	    sizeof(struct bwfm_pci_buf));
848 
849 	for (i = 0; i < BWFM_NUM_IOCTL_PKTIDS; i++) {
850 		bus_dmamap_destroy(sc->sc_dmat, sc->sc_ioctl_pkts.pkts[i].bb_map);
851 		if (sc->sc_ioctl_pkts.pkts[i].bb_m)
852 			m_freem(sc->sc_ioctl_pkts.pkts[i].bb_m);
853 	}
854 	free(sc->sc_ioctl_pkts.pkts, M_DEVBUF, BWFM_NUM_IOCTL_PKTIDS *
855 	    sizeof(struct bwfm_pci_buf));
856 
857 	for (i = 0; i < sc->sc_max_flowrings; i++) {
858 		if (sc->sc_flowrings[i].status >= RING_OPEN)
859 			bwfm_pci_dmamem_free(sc, sc->sc_flowrings[i].ring);
860 	}
861 	free(sc->sc_flowrings, M_DEVBUF, sc->sc_max_flowrings *
862 	    sizeof(struct bwfm_pci_msgring));
863 
864 	bwfm_pci_dmamem_free(sc, sc->sc_ringupd_buf);
865 	bwfm_pci_dmamem_free(sc, sc->sc_scratch_buf);
866 	bwfm_pci_dmamem_free(sc, sc->sc_rx_complete.ring);
867 	bwfm_pci_dmamem_free(sc, sc->sc_tx_complete.ring);
868 	bwfm_pci_dmamem_free(sc, sc->sc_ctrl_complete.ring);
869 	bwfm_pci_dmamem_free(sc, sc->sc_rxpost_submit.ring);
870 	bwfm_pci_dmamem_free(sc, sc->sc_ctrl_submit.ring);
871 	if (sc->sc_dma_idx_buf) {
872 		bwfm_pci_dmamem_free(sc, sc->sc_dma_idx_buf);
873 		sc->sc_dma_idx_buf = NULL;
874 	}
875 
876 	sc->sc_initialized = 0;
877 }
878 
879 int
880 bwfm_pci_activate(struct device *self, int act)
881 {
882 	struct bwfm_pci_softc *sc = (struct bwfm_pci_softc *)self;
883 	struct bwfm_softc *bwfm = (void *)sc;
884 	int error = 0;
885 
886 	switch (act) {
887 	case DVACT_QUIESCE:
888 		error = bwfm_activate(bwfm, act);
889 		if (error)
890 			return error;
891 		if (sc->sc_initialized) {
892 			sc->sc_mbdata_done = 0;
893 			error = bwfm_pci_send_mb_data(sc,
894 			    BWFM_PCI_H2D_HOST_D3_INFORM);
895 			if (error)
896 				return error;
897 			tsleep_nsec(&sc->sc_mbdata_done, PCATCH,
898 			    DEVNAME(sc), SEC_TO_NSEC(2));
899 			if (!sc->sc_mbdata_done)
900 				return ETIMEDOUT;
901 		}
902 		break;
903 	case DVACT_WAKEUP:
904 		if (sc->sc_initialized) {
905 			/* If device can't be resumed, re-init. */
906 			if (bwfm_pci_intmask(sc) == 0 ||
907 			    bwfm_pci_send_mb_data(sc,
908 			    BWFM_PCI_H2D_HOST_D0_INFORM) != 0) {
909 				bwfm_cleanup(bwfm);
910 				bwfm_pci_cleanup(sc);
911 			}
912 		}
913 		error = bwfm_activate(bwfm, act);
914 		if (error)
915 			return error;
916 		break;
917 	default:
918 		break;
919 	}
920 
921 	return 0;
922 }
923 
924 #if defined(__HAVE_FDT)
925 int
926 bwfm_pci_read_otp(struct bwfm_pci_softc *sc)
927 {
928 	struct bwfm_softc *bwfm = (void *)sc;
929 	struct bwfm_core *core;
930 	uint8_t otp[BWFM_OTP_SIZE];
931 	int i;
932 
933 	if (bwfm->sc_chip.ch_chip != BRCM_CC_4378_CHIP_ID)
934 		return 0;
935 
936 	core = bwfm_chip_get_core(bwfm, BWFM_AGENT_CORE_GCI);
937 	if (core == NULL)
938 		return 1;
939 
940 	for (i = 0; i < (sizeof(otp) / sizeof(uint32_t)); i++)
941 		((uint32_t *)otp)[i] = bwfm_pci_buscore_read(bwfm,
942 		    core->co_base + BWFM_OTP_4378_BASE + i * sizeof(uint32_t));
943 
944 	for (i = 0; i < BWFM_OTP_SIZE - 1; ) {
945 		if (otp[i + 0] == 0) {
946 			i++;
947 			continue;
948 		}
949 		if (i + otp[i + 1] > BWFM_OTP_SIZE)
950 			break;
951 		bwfm_pci_process_otp_tuple(sc, otp[i + 0], otp[i + 1],
952 		    &otp[i + 2]);
953 		i += otp[i + 1];
954 	}
955 
956 	return 0;
957 }
958 
959 void
960 bwfm_pci_process_otp_tuple(struct bwfm_pci_softc *sc, uint8_t type, uint8_t size,
961     uint8_t *data)
962 {
963 	struct bwfm_softc *bwfm = (void *)sc;
964 	char chiprev[8] = "", module[8] = "", modrev[8] = "", vendor[8] = "", chip[8] = "";
965 	char product[16] = "unknown";
966 	int node, len;
967 
968 	switch (type) {
969 	case 0x15: /* system vendor OTP */
970 		DPRINTF(("%s: system vendor OTP\n", DEVNAME(sc)));
971 		if (size < sizeof(uint32_t))
972 			return;
973 		if (data[0] != 0x08 || data[1] != 0x00 ||
974 		    data[2] != 0x00 || data[3] != 0x00)
975 			return;
976 		size -= sizeof(uint32_t);
977 		data += sizeof(uint32_t);
978 		while (size) {
979 			/* reached end */
980 			if (data[0] == 0xff)
981 				break;
982 			for (len = 0; len < size; len++)
983 				if (data[len] == 0x00 || data[len] == ' ' ||
984 				    data[len] == 0xff)
985 					break;
986 			if (len < 3 || len > 9) /* X=abcdef */
987 				goto next;
988 			if (data[1] != '=')
989 				goto next;
990 			/* NULL-terminate string */
991 			if (data[len] == ' ')
992 				data[len] = '\0';
993 			switch (data[0]) {
994 			case 's':
995 				strlcpy(chiprev, &data[2], sizeof(chiprev));
996 				break;
997 			case 'M':
998 				strlcpy(module, &data[2], sizeof(module));
999 				break;
1000 			case 'm':
1001 				strlcpy(modrev, &data[2], sizeof(modrev));
1002 				break;
1003 			case 'V':
1004 				strlcpy(vendor, &data[2], sizeof(vendor));
1005 				break;
1006 			}
1007 next:
1008 			/* skip content */
1009 			data += len;
1010 			size -= len;
1011 			/* skip spacer tag */
1012 			if (size) {
1013 				data++;
1014 				size--;
1015 			}
1016 		}
1017 		snprintf(chip, sizeof(chip),
1018 		    bwfm->sc_chip.ch_chip > 40000 ? "%05d" : "%04x",
1019 		    bwfm->sc_chip.ch_chip);
1020 		node = OF_finddevice("/chosen");
1021 		if (node != -1)
1022 			OF_getprop(node, "module-wlan0", product, sizeof(product));
1023 		printf("%s: firmware C-%s%s%s/P-%s_M-%s_V-%s__m-%s\n",
1024 		    DEVNAME(sc), chip,
1025 		    *chiprev ? "__s-" : "", *chiprev ? chiprev : "",
1026 		    product, module, vendor, modrev);
1027 		break;
1028 	case 0x80: /* Broadcom CIS */
1029 		DPRINTF(("%s: Broadcom CIS\n", DEVNAME(sc)));
1030 		break;
1031 	default:
1032 		DPRINTF(("%s: unknown OTP tuple\n", DEVNAME(sc)));
1033 		break;
1034 	}
1035 }
1036 #endif
1037 
1038 /* DMA code */
1039 struct bwfm_pci_dmamem *
1040 bwfm_pci_dmamem_alloc(struct bwfm_pci_softc *sc, bus_size_t size, bus_size_t align)
1041 {
1042 	struct bwfm_pci_dmamem *bdm;
1043 	int nsegs;
1044 
1045 	bdm = malloc(sizeof(*bdm), M_DEVBUF, M_WAITOK | M_ZERO);
1046 	bdm->bdm_size = size;
1047 
1048 	if (bus_dmamap_create(sc->sc_dmat, size, 1, size, 0,
1049 	    BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW, &bdm->bdm_map) != 0)
1050 		goto bdmfree;
1051 
1052 	if (bus_dmamem_alloc(sc->sc_dmat, size, align, 0, &bdm->bdm_seg, 1,
1053 	    &nsegs, BUS_DMA_WAITOK) != 0)
1054 		goto destroy;
1055 
1056 	if (bus_dmamem_map(sc->sc_dmat, &bdm->bdm_seg, nsegs, size,
1057 	    &bdm->bdm_kva, BUS_DMA_WAITOK | BUS_DMA_COHERENT) != 0)
1058 		goto free;
1059 
1060 	if (bus_dmamap_load(sc->sc_dmat, bdm->bdm_map, bdm->bdm_kva, size,
1061 	    NULL, BUS_DMA_WAITOK) != 0)
1062 		goto unmap;
1063 
1064 	bzero(bdm->bdm_kva, size);
1065 
1066 	return (bdm);
1067 
1068 unmap:
1069 	bus_dmamem_unmap(sc->sc_dmat, bdm->bdm_kva, size);
1070 free:
1071 	bus_dmamem_free(sc->sc_dmat, &bdm->bdm_seg, 1);
1072 destroy:
1073 	bus_dmamap_destroy(sc->sc_dmat, bdm->bdm_map);
1074 bdmfree:
1075 	free(bdm, M_DEVBUF, sizeof(*bdm));
1076 
1077 	return (NULL);
1078 }
1079 
1080 void
1081 bwfm_pci_dmamem_free(struct bwfm_pci_softc *sc, struct bwfm_pci_dmamem *bdm)
1082 {
1083 	bus_dmamem_unmap(sc->sc_dmat, bdm->bdm_kva, bdm->bdm_size);
1084 	bus_dmamem_free(sc->sc_dmat, &bdm->bdm_seg, 1);
1085 	bus_dmamap_destroy(sc->sc_dmat, bdm->bdm_map);
1086 	free(bdm, M_DEVBUF, sizeof(*bdm));
1087 }
1088 
1089 /*
1090  * We need a simple mapping from a packet ID to mbufs, because when
1091  * a transfer completed, we only know the ID so we have to look up
1092  * the memory for the ID.  This simply looks for an empty slot.
1093  */
1094 int
1095 bwfm_pci_pktid_avail(struct bwfm_pci_softc *sc, struct bwfm_pci_pkts *pkts)
1096 {
1097 	int i, idx;
1098 
1099 	idx = pkts->last + 1;
1100 	for (i = 0; i < pkts->npkt; i++) {
1101 		if (idx == pkts->npkt)
1102 			idx = 0;
1103 		if (pkts->pkts[idx].bb_m == NULL)
1104 			return 0;
1105 		idx++;
1106 	}
1107 	return ENOBUFS;
1108 }
1109 
1110 int
1111 bwfm_pci_pktid_new(struct bwfm_pci_softc *sc, struct bwfm_pci_pkts *pkts,
1112     struct mbuf *m, uint32_t *pktid, paddr_t *paddr)
1113 {
1114 	int i, idx;
1115 
1116 	idx = pkts->last + 1;
1117 	for (i = 0; i < pkts->npkt; i++) {
1118 		if (idx == pkts->npkt)
1119 			idx = 0;
1120 		if (pkts->pkts[idx].bb_m == NULL) {
1121 			if (bus_dmamap_load_mbuf(sc->sc_dmat,
1122 			    pkts->pkts[idx].bb_map, m, BUS_DMA_NOWAIT) != 0) {
1123 				if (m_defrag(m, M_DONTWAIT))
1124 					return EFBIG;
1125 				if (bus_dmamap_load_mbuf(sc->sc_dmat,
1126 				    pkts->pkts[idx].bb_map, m, BUS_DMA_NOWAIT) != 0)
1127 					return EFBIG;
1128 			}
1129 			bus_dmamap_sync(sc->sc_dmat, pkts->pkts[idx].bb_map,
1130 			    0, pkts->pkts[idx].bb_map->dm_mapsize,
1131 			    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1132 			pkts->last = idx;
1133 			pkts->pkts[idx].bb_m = m;
1134 			*pktid = idx;
1135 			*paddr = pkts->pkts[idx].bb_map->dm_segs[0].ds_addr;
1136 			return 0;
1137 		}
1138 		idx++;
1139 	}
1140 	return ENOBUFS;
1141 }
1142 
1143 struct mbuf *
1144 bwfm_pci_pktid_free(struct bwfm_pci_softc *sc, struct bwfm_pci_pkts *pkts,
1145     uint32_t pktid)
1146 {
1147 	struct mbuf *m;
1148 
1149 	if (pktid >= pkts->npkt || pkts->pkts[pktid].bb_m == NULL)
1150 		return NULL;
1151 	bus_dmamap_sync(sc->sc_dmat, pkts->pkts[pktid].bb_map, 0,
1152 	    pkts->pkts[pktid].bb_map->dm_mapsize,
1153 	    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1154 	bus_dmamap_unload(sc->sc_dmat, pkts->pkts[pktid].bb_map);
1155 	m = pkts->pkts[pktid].bb_m;
1156 	pkts->pkts[pktid].bb_m = NULL;
1157 	return m;
1158 }
1159 
1160 void
1161 bwfm_pci_fill_rx_rings(struct bwfm_pci_softc *sc)
1162 {
1163 	bwfm_pci_fill_rx_buf_ring(sc);
1164 	bwfm_pci_fill_rx_ioctl_ring(sc, &sc->sc_ioctl_ring,
1165 	    MSGBUF_TYPE_IOCTLRESP_BUF_POST);
1166 	bwfm_pci_fill_rx_ioctl_ring(sc, &sc->sc_event_ring,
1167 	    MSGBUF_TYPE_EVENT_BUF_POST);
1168 }
1169 
1170 void
1171 bwfm_pci_fill_rx_ioctl_ring(struct bwfm_pci_softc *sc, struct if_rxring *rxring,
1172     uint32_t msgtype)
1173 {
1174 	struct msgbuf_rx_ioctl_resp_or_event *req;
1175 	struct mbuf *m;
1176 	uint32_t pktid;
1177 	paddr_t paddr;
1178 	int s, slots;
1179 
1180 	s = splnet();
1181 	for (slots = if_rxr_get(rxring, 8); slots > 0; slots--) {
1182 		if (bwfm_pci_pktid_avail(sc, &sc->sc_rx_pkts))
1183 			break;
1184 		req = bwfm_pci_ring_write_reserve(sc, &sc->sc_ctrl_submit);
1185 		if (req == NULL)
1186 			break;
1187 		m = MCLGETL(NULL, M_DONTWAIT, MSGBUF_MAX_CTL_PKT_SIZE);
1188 		if (m == NULL) {
1189 			bwfm_pci_ring_write_cancel(sc, &sc->sc_ctrl_submit, 1);
1190 			break;
1191 		}
1192 		m->m_len = m->m_pkthdr.len = MSGBUF_MAX_CTL_PKT_SIZE;
1193 		if (bwfm_pci_pktid_new(sc, &sc->sc_rx_pkts, m, &pktid, &paddr)) {
1194 			bwfm_pci_ring_write_cancel(sc, &sc->sc_ctrl_submit, 1);
1195 			m_freem(m);
1196 			break;
1197 		}
1198 		memset(req, 0, sizeof(*req));
1199 		req->msg.msgtype = msgtype;
1200 		req->msg.request_id = htole32(pktid);
1201 		req->host_buf_len = htole16(MSGBUF_MAX_CTL_PKT_SIZE);
1202 		req->host_buf_addr.high_addr = htole32((uint64_t)paddr >> 32);
1203 		req->host_buf_addr.low_addr = htole32(paddr & 0xffffffff);
1204 		bwfm_pci_ring_write_commit(sc, &sc->sc_ctrl_submit);
1205 	}
1206 	if_rxr_put(rxring, slots);
1207 	splx(s);
1208 }
1209 
1210 void
1211 bwfm_pci_fill_rx_buf_ring(struct bwfm_pci_softc *sc)
1212 {
1213 	struct msgbuf_rx_bufpost *req;
1214 	struct mbuf *m;
1215 	uint32_t pktid;
1216 	paddr_t paddr;
1217 	int s, slots;
1218 
1219 	s = splnet();
1220 	for (slots = if_rxr_get(&sc->sc_rxbuf_ring, sc->sc_max_rxbufpost);
1221 	    slots > 0; slots--) {
1222 		if (bwfm_pci_pktid_avail(sc, &sc->sc_rx_pkts))
1223 			break;
1224 		req = bwfm_pci_ring_write_reserve(sc, &sc->sc_rxpost_submit);
1225 		if (req == NULL)
1226 			break;
1227 		m = MCLGETL(NULL, M_DONTWAIT, MSGBUF_MAX_PKT_SIZE);
1228 		if (m == NULL) {
1229 			bwfm_pci_ring_write_cancel(sc, &sc->sc_rxpost_submit, 1);
1230 			break;
1231 		}
1232 		m->m_len = m->m_pkthdr.len = MSGBUF_MAX_PKT_SIZE;
1233 		if (bwfm_pci_pktid_new(sc, &sc->sc_rx_pkts, m, &pktid, &paddr)) {
1234 			bwfm_pci_ring_write_cancel(sc, &sc->sc_rxpost_submit, 1);
1235 			m_freem(m);
1236 			break;
1237 		}
1238 		memset(req, 0, sizeof(*req));
1239 		req->msg.msgtype = MSGBUF_TYPE_RXBUF_POST;
1240 		req->msg.request_id = htole32(pktid);
1241 		req->data_buf_len = htole16(MSGBUF_MAX_PKT_SIZE);
1242 		req->data_buf_addr.high_addr = htole32((uint64_t)paddr >> 32);
1243 		req->data_buf_addr.low_addr = htole32(paddr & 0xffffffff);
1244 		bwfm_pci_ring_write_commit(sc, &sc->sc_rxpost_submit);
1245 	}
1246 	if_rxr_put(&sc->sc_rxbuf_ring, slots);
1247 	splx(s);
1248 }
1249 
1250 int
1251 bwfm_pci_setup_ring(struct bwfm_pci_softc *sc, struct bwfm_pci_msgring *ring,
1252     int nitem, size_t itemsz, uint32_t w_idx, uint32_t r_idx,
1253     int idx, uint32_t idx_off, uint32_t *ring_mem)
1254 {
1255 	ring->w_idx_addr = w_idx + idx * idx_off;
1256 	ring->r_idx_addr = r_idx + idx * idx_off;
1257 	ring->w_ptr = 0;
1258 	ring->r_ptr = 0;
1259 	ring->nitem = nitem;
1260 	ring->itemsz = itemsz;
1261 	bwfm_pci_ring_write_rptr(sc, ring);
1262 	bwfm_pci_ring_write_wptr(sc, ring);
1263 
1264 	ring->ring = bwfm_pci_dmamem_alloc(sc, nitem * itemsz, 8);
1265 	if (ring->ring == NULL)
1266 		return ENOMEM;
1267 	bus_space_write_4(sc->sc_tcm_iot, sc->sc_tcm_ioh,
1268 	    *ring_mem + BWFM_RING_MEM_BASE_ADDR_LOW,
1269 	    BWFM_PCI_DMA_DVA(ring->ring) & 0xffffffff);
1270 	bus_space_write_4(sc->sc_tcm_iot, sc->sc_tcm_ioh,
1271 	    *ring_mem + BWFM_RING_MEM_BASE_ADDR_HIGH,
1272 	    BWFM_PCI_DMA_DVA(ring->ring) >> 32);
1273 	bus_space_write_2(sc->sc_tcm_iot, sc->sc_tcm_ioh,
1274 	    *ring_mem + BWFM_RING_MAX_ITEM, nitem);
1275 	bus_space_write_2(sc->sc_tcm_iot, sc->sc_tcm_ioh,
1276 	    *ring_mem + BWFM_RING_LEN_ITEMS, itemsz);
1277 	*ring_mem = *ring_mem + BWFM_RING_MEM_SZ;
1278 	return 0;
1279 }
1280 
1281 int
1282 bwfm_pci_setup_flowring(struct bwfm_pci_softc *sc, struct bwfm_pci_msgring *ring,
1283     int nitem, size_t itemsz)
1284 {
1285 	ring->w_ptr = 0;
1286 	ring->r_ptr = 0;
1287 	ring->nitem = nitem;
1288 	ring->itemsz = itemsz;
1289 	bwfm_pci_ring_write_rptr(sc, ring);
1290 	bwfm_pci_ring_write_wptr(sc, ring);
1291 
1292 	ring->ring = bwfm_pci_dmamem_alloc(sc, nitem * itemsz, 8);
1293 	if (ring->ring == NULL)
1294 		return ENOMEM;
1295 	return 0;
1296 }
1297 
1298 /* Ring helpers */
1299 void
1300 bwfm_pci_ring_bell(struct bwfm_pci_softc *sc,
1301     struct bwfm_pci_msgring *ring)
1302 {
1303 	struct bwfm_softc *bwfm = (void *)sc;
1304 
1305 	if (bwfm->sc_chip.ch_chip == BRCM_CC_4378_CHIP_ID)
1306 		bus_space_write_4(sc->sc_reg_iot, sc->sc_reg_ioh,
1307 		    BWFM_PCI_64_PCIE2REG_H2D_MAILBOX_0, 1);
1308 	else
1309 		bus_space_write_4(sc->sc_reg_iot, sc->sc_reg_ioh,
1310 		    BWFM_PCI_PCIE2REG_H2D_MAILBOX_0, 1);
1311 }
1312 
1313 void
1314 bwfm_pci_ring_update_rptr(struct bwfm_pci_softc *sc,
1315     struct bwfm_pci_msgring *ring)
1316 {
1317 	if (sc->sc_dma_idx_sz == 0) {
1318 		ring->r_ptr = bus_space_read_2(sc->sc_tcm_iot,
1319 		    sc->sc_tcm_ioh, ring->r_idx_addr);
1320 	} else {
1321 		bus_dmamap_sync(sc->sc_dmat,
1322 		    BWFM_PCI_DMA_MAP(sc->sc_dma_idx_buf), ring->r_idx_addr,
1323 		    sizeof(uint16_t), BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1324 		ring->r_ptr = *(uint16_t *)(BWFM_PCI_DMA_KVA(sc->sc_dma_idx_buf)
1325 		    + ring->r_idx_addr);
1326 	}
1327 }
1328 
1329 void
1330 bwfm_pci_ring_update_wptr(struct bwfm_pci_softc *sc,
1331     struct bwfm_pci_msgring *ring)
1332 {
1333 	if (sc->sc_dma_idx_sz == 0) {
1334 		ring->w_ptr = bus_space_read_2(sc->sc_tcm_iot,
1335 		    sc->sc_tcm_ioh, ring->w_idx_addr);
1336 	} else {
1337 		bus_dmamap_sync(sc->sc_dmat,
1338 		    BWFM_PCI_DMA_MAP(sc->sc_dma_idx_buf), ring->w_idx_addr,
1339 		    sizeof(uint16_t), BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1340 		ring->w_ptr = *(uint16_t *)(BWFM_PCI_DMA_KVA(sc->sc_dma_idx_buf)
1341 		    + ring->w_idx_addr);
1342 	}
1343 }
1344 
1345 void
1346 bwfm_pci_ring_write_rptr(struct bwfm_pci_softc *sc,
1347     struct bwfm_pci_msgring *ring)
1348 {
1349 	if (sc->sc_dma_idx_sz == 0) {
1350 		bus_space_write_2(sc->sc_tcm_iot, sc->sc_tcm_ioh,
1351 		    ring->r_idx_addr, ring->r_ptr);
1352 	} else {
1353 		*(uint16_t *)(BWFM_PCI_DMA_KVA(sc->sc_dma_idx_buf)
1354 		    + ring->r_idx_addr) = ring->r_ptr;
1355 		bus_dmamap_sync(sc->sc_dmat,
1356 		    BWFM_PCI_DMA_MAP(sc->sc_dma_idx_buf), ring->r_idx_addr,
1357 		    sizeof(uint16_t), BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1358 	}
1359 }
1360 
1361 void
1362 bwfm_pci_ring_write_wptr(struct bwfm_pci_softc *sc,
1363     struct bwfm_pci_msgring *ring)
1364 {
1365 	if (sc->sc_dma_idx_sz == 0) {
1366 		bus_space_write_2(sc->sc_tcm_iot, sc->sc_tcm_ioh,
1367 		    ring->w_idx_addr, ring->w_ptr);
1368 	} else {
1369 		*(uint16_t *)(BWFM_PCI_DMA_KVA(sc->sc_dma_idx_buf)
1370 		    + ring->w_idx_addr) = ring->w_ptr;
1371 		bus_dmamap_sync(sc->sc_dmat,
1372 		    BWFM_PCI_DMA_MAP(sc->sc_dma_idx_buf), ring->w_idx_addr,
1373 		    sizeof(uint16_t), BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1374 	}
1375 }
1376 
1377 /*
1378  * Retrieve a free descriptor to put new stuff in, but don't commit
1379  * to it yet so we can rollback later if any error occurs.
1380  */
1381 void *
1382 bwfm_pci_ring_write_reserve(struct bwfm_pci_softc *sc,
1383     struct bwfm_pci_msgring *ring)
1384 {
1385 	int available;
1386 	char *ret;
1387 
1388 	bwfm_pci_ring_update_rptr(sc, ring);
1389 
1390 	if (ring->r_ptr > ring->w_ptr)
1391 		available = ring->r_ptr - ring->w_ptr;
1392 	else
1393 		available = ring->r_ptr + (ring->nitem - ring->w_ptr);
1394 
1395 	if (available <= 1)
1396 		return NULL;
1397 
1398 	ret = BWFM_PCI_DMA_KVA(ring->ring) + (ring->w_ptr * ring->itemsz);
1399 	ring->w_ptr += 1;
1400 	if (ring->w_ptr == ring->nitem)
1401 		ring->w_ptr = 0;
1402 	return ret;
1403 }
1404 
1405 void *
1406 bwfm_pci_ring_write_reserve_multi(struct bwfm_pci_softc *sc,
1407     struct bwfm_pci_msgring *ring, int count, int *avail)
1408 {
1409 	int available;
1410 	char *ret;
1411 
1412 	bwfm_pci_ring_update_rptr(sc, ring);
1413 
1414 	if (ring->r_ptr > ring->w_ptr)
1415 		available = ring->r_ptr - ring->w_ptr;
1416 	else
1417 		available = ring->r_ptr + (ring->nitem - ring->w_ptr);
1418 
1419 	if (available <= 1)
1420 		return NULL;
1421 
1422 	ret = BWFM_PCI_DMA_KVA(ring->ring) + (ring->w_ptr * ring->itemsz);
1423 	*avail = min(count, available - 1);
1424 	if (*avail + ring->w_ptr > ring->nitem)
1425 		*avail = ring->nitem - ring->w_ptr;
1426 	ring->w_ptr += *avail;
1427 	if (ring->w_ptr == ring->nitem)
1428 		ring->w_ptr = 0;
1429 	return ret;
1430 }
1431 
1432 /*
1433  * Read number of descriptors available (submitted by the firmware)
1434  * and retrieve pointer to first descriptor.
1435  */
1436 void *
1437 bwfm_pci_ring_read_avail(struct bwfm_pci_softc *sc,
1438     struct bwfm_pci_msgring *ring, int *avail)
1439 {
1440 	bwfm_pci_ring_update_wptr(sc, ring);
1441 
1442 	if (ring->w_ptr >= ring->r_ptr)
1443 		*avail = ring->w_ptr - ring->r_ptr;
1444 	else
1445 		*avail = ring->nitem - ring->r_ptr;
1446 
1447 	if (*avail == 0)
1448 		return NULL;
1449 
1450 	bus_dmamap_sync(sc->sc_dmat, BWFM_PCI_DMA_MAP(ring->ring),
1451 	    ring->r_ptr * ring->itemsz, *avail * ring->itemsz,
1452 	    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1453 	return BWFM_PCI_DMA_KVA(ring->ring) + (ring->r_ptr * ring->itemsz);
1454 }
1455 
1456 /*
1457  * Let firmware know we read N descriptors.
1458  */
1459 void
1460 bwfm_pci_ring_read_commit(struct bwfm_pci_softc *sc,
1461     struct bwfm_pci_msgring *ring, int nitem)
1462 {
1463 	ring->r_ptr += nitem;
1464 	if (ring->r_ptr == ring->nitem)
1465 		ring->r_ptr = 0;
1466 	bwfm_pci_ring_write_rptr(sc, ring);
1467 }
1468 
1469 /*
1470  * Let firmware know that we submitted some descriptors.
1471  */
1472 void
1473 bwfm_pci_ring_write_commit(struct bwfm_pci_softc *sc,
1474     struct bwfm_pci_msgring *ring)
1475 {
1476 	bus_dmamap_sync(sc->sc_dmat, BWFM_PCI_DMA_MAP(ring->ring),
1477 	    0, BWFM_PCI_DMA_LEN(ring->ring), BUS_DMASYNC_PREREAD |
1478 	    BUS_DMASYNC_PREWRITE);
1479 	bwfm_pci_ring_write_wptr(sc, ring);
1480 	bwfm_pci_ring_bell(sc, ring);
1481 }
1482 
1483 /*
1484  * Rollback N descriptors in case we don't actually want
1485  * to commit to it.
1486  */
1487 void
1488 bwfm_pci_ring_write_cancel(struct bwfm_pci_softc *sc,
1489     struct bwfm_pci_msgring *ring, int nitem)
1490 {
1491 	if (ring->w_ptr == 0)
1492 		ring->w_ptr = ring->nitem - nitem;
1493 	else
1494 		ring->w_ptr -= nitem;
1495 }
1496 
1497 /*
1498  * Foreach written descriptor on the ring, pass the descriptor to
1499  * a message handler and let the firmware know we handled it.
1500  */
1501 void
1502 bwfm_pci_ring_rx(struct bwfm_pci_softc *sc, struct bwfm_pci_msgring *ring,
1503     struct mbuf_list *ml)
1504 {
1505 	void *buf;
1506 	int avail, processed;
1507 
1508 again:
1509 	buf = bwfm_pci_ring_read_avail(sc, ring, &avail);
1510 	if (buf == NULL)
1511 		return;
1512 
1513 	processed = 0;
1514 	while (avail) {
1515 		bwfm_pci_msg_rx(sc, buf + sc->sc_rx_dataoffset, ml);
1516 		buf += ring->itemsz;
1517 		processed++;
1518 		if (processed == 48) {
1519 			bwfm_pci_ring_read_commit(sc, ring, processed);
1520 			processed = 0;
1521 		}
1522 		avail--;
1523 	}
1524 	if (processed)
1525 		bwfm_pci_ring_read_commit(sc, ring, processed);
1526 	if (ring->r_ptr == 0)
1527 		goto again;
1528 }
1529 
1530 void
1531 bwfm_pci_msg_rx(struct bwfm_pci_softc *sc, void *buf, struct mbuf_list *ml)
1532 {
1533 	struct ifnet *ifp = &sc->sc_sc.sc_ic.ic_if;
1534 	struct msgbuf_ioctl_resp_hdr *resp;
1535 	struct msgbuf_tx_status *tx;
1536 	struct msgbuf_rx_complete *rx;
1537 	struct msgbuf_rx_event *event;
1538 	struct msgbuf_common_hdr *msg;
1539 	struct msgbuf_flowring_create_resp *fcr;
1540 	struct msgbuf_flowring_delete_resp *fdr;
1541 	struct bwfm_pci_msgring *ring;
1542 	struct mbuf *m;
1543 	int flowid;
1544 
1545 	msg = (struct msgbuf_common_hdr *)buf;
1546 	switch (msg->msgtype)
1547 	{
1548 	case MSGBUF_TYPE_FLOW_RING_CREATE_CMPLT:
1549 		fcr = (struct msgbuf_flowring_create_resp *)buf;
1550 		flowid = letoh16(fcr->compl_hdr.flow_ring_id);
1551 		if (flowid < 2)
1552 			break;
1553 		flowid -= 2;
1554 		if (flowid >= sc->sc_max_flowrings)
1555 			break;
1556 		ring = &sc->sc_flowrings[flowid];
1557 		if (ring->status != RING_OPENING)
1558 			break;
1559 		if (fcr->compl_hdr.status) {
1560 			printf("%s: failed to open flowring %d\n",
1561 			    DEVNAME(sc), flowid);
1562 			ring->status = RING_CLOSED;
1563 			if (ring->m) {
1564 				m_freem(ring->m);
1565 				ring->m = NULL;
1566 			}
1567 			ifq_restart(&ifp->if_snd);
1568 			break;
1569 		}
1570 		ring->status = RING_OPEN;
1571 		if (ring->m != NULL) {
1572 			m = ring->m;
1573 			ring->m = NULL;
1574 			if (bwfm_pci_txdata(&sc->sc_sc, m))
1575 				m_freem(ring->m);
1576 		}
1577 		ifq_restart(&ifp->if_snd);
1578 		break;
1579 	case MSGBUF_TYPE_FLOW_RING_DELETE_CMPLT:
1580 		fdr = (struct msgbuf_flowring_delete_resp *)buf;
1581 		flowid = letoh16(fdr->compl_hdr.flow_ring_id);
1582 		if (flowid < 2)
1583 			break;
1584 		flowid -= 2;
1585 		if (flowid >= sc->sc_max_flowrings)
1586 			break;
1587 		ring = &sc->sc_flowrings[flowid];
1588 		if (ring->status != RING_CLOSING)
1589 			break;
1590 		if (fdr->compl_hdr.status) {
1591 			printf("%s: failed to delete flowring %d\n",
1592 			    DEVNAME(sc), flowid);
1593 			break;
1594 		}
1595 		bwfm_pci_dmamem_free(sc, ring->ring);
1596 		ring->status = RING_CLOSED;
1597 		break;
1598 	case MSGBUF_TYPE_IOCTLPTR_REQ_ACK:
1599 		m = bwfm_pci_pktid_free(sc, &sc->sc_ioctl_pkts,
1600 		    letoh32(msg->request_id));
1601 		if (m == NULL)
1602 			break;
1603 		m_freem(m);
1604 		break;
1605 	case MSGBUF_TYPE_IOCTL_CMPLT:
1606 		resp = (struct msgbuf_ioctl_resp_hdr *)buf;
1607 		bwfm_pci_msgbuf_rxioctl(sc, resp);
1608 		if_rxr_put(&sc->sc_ioctl_ring, 1);
1609 		bwfm_pci_fill_rx_rings(sc);
1610 		break;
1611 	case MSGBUF_TYPE_WL_EVENT:
1612 		event = (struct msgbuf_rx_event *)buf;
1613 		m = bwfm_pci_pktid_free(sc, &sc->sc_rx_pkts,
1614 		    letoh32(event->msg.request_id));
1615 		if (m == NULL)
1616 			break;
1617 		m_adj(m, sc->sc_rx_dataoffset);
1618 		m->m_len = m->m_pkthdr.len = letoh16(event->event_data_len);
1619 		bwfm_rx(&sc->sc_sc, m, ml);
1620 		if_rxr_put(&sc->sc_event_ring, 1);
1621 		bwfm_pci_fill_rx_rings(sc);
1622 		break;
1623 	case MSGBUF_TYPE_TX_STATUS:
1624 		tx = (struct msgbuf_tx_status *)buf;
1625 		m = bwfm_pci_pktid_free(sc, &sc->sc_tx_pkts,
1626 		    letoh32(tx->msg.request_id) - 1);
1627 		if (m == NULL)
1628 			break;
1629 		m_freem(m);
1630 		if (sc->sc_tx_pkts_full) {
1631 			sc->sc_tx_pkts_full = 0;
1632 			ifq_restart(&ifp->if_snd);
1633 		}
1634 		break;
1635 	case MSGBUF_TYPE_RX_CMPLT:
1636 		rx = (struct msgbuf_rx_complete *)buf;
1637 		m = bwfm_pci_pktid_free(sc, &sc->sc_rx_pkts,
1638 		    letoh32(rx->msg.request_id));
1639 		if (m == NULL)
1640 			break;
1641 		if (letoh16(rx->data_offset))
1642 			m_adj(m, letoh16(rx->data_offset));
1643 		else if (sc->sc_rx_dataoffset)
1644 			m_adj(m, sc->sc_rx_dataoffset);
1645 		m->m_len = m->m_pkthdr.len = letoh16(rx->data_len);
1646 		bwfm_rx(&sc->sc_sc, m, ml);
1647 		if_rxr_put(&sc->sc_rxbuf_ring, 1);
1648 		bwfm_pci_fill_rx_rings(sc);
1649 		break;
1650 	default:
1651 		printf("%s: msgtype 0x%08x\n", __func__, msg->msgtype);
1652 		break;
1653 	}
1654 }
1655 
1656 /* Bus core helpers */
1657 void
1658 bwfm_pci_select_core(struct bwfm_pci_softc *sc, int id)
1659 {
1660 	struct bwfm_softc *bwfm = (void *)sc;
1661 	struct bwfm_core *core;
1662 
1663 	core = bwfm_chip_get_core(bwfm, id);
1664 	if (core == NULL) {
1665 		printf("%s: could not find core to select", DEVNAME(sc));
1666 		return;
1667 	}
1668 
1669 	pci_conf_write(sc->sc_pc, sc->sc_tag,
1670 	    BWFM_PCI_BAR0_WINDOW, core->co_base);
1671 	if (pci_conf_read(sc->sc_pc, sc->sc_tag,
1672 	    BWFM_PCI_BAR0_WINDOW) != core->co_base)
1673 		pci_conf_write(sc->sc_pc, sc->sc_tag,
1674 		    BWFM_PCI_BAR0_WINDOW, core->co_base);
1675 }
1676 
1677 uint32_t
1678 bwfm_pci_buscore_read(struct bwfm_softc *bwfm, uint32_t reg)
1679 {
1680 	struct bwfm_pci_softc *sc = (void *)bwfm;
1681 	uint32_t page, offset;
1682 
1683 	page = reg & ~(BWFM_PCI_BAR0_REG_SIZE - 1);
1684 	offset = reg & (BWFM_PCI_BAR0_REG_SIZE - 1);
1685 	pci_conf_write(sc->sc_pc, sc->sc_tag, BWFM_PCI_BAR0_WINDOW, page);
1686 	return bus_space_read_4(sc->sc_reg_iot, sc->sc_reg_ioh, offset);
1687 }
1688 
1689 void
1690 bwfm_pci_buscore_write(struct bwfm_softc *bwfm, uint32_t reg, uint32_t val)
1691 {
1692 	struct bwfm_pci_softc *sc = (void *)bwfm;
1693 	uint32_t page, offset;
1694 
1695 	page = reg & ~(BWFM_PCI_BAR0_REG_SIZE - 1);
1696 	offset = reg & (BWFM_PCI_BAR0_REG_SIZE - 1);
1697 	pci_conf_write(sc->sc_pc, sc->sc_tag, BWFM_PCI_BAR0_WINDOW, page);
1698 	bus_space_write_4(sc->sc_reg_iot, sc->sc_reg_ioh, offset, val);
1699 }
1700 
1701 int
1702 bwfm_pci_buscore_prepare(struct bwfm_softc *bwfm)
1703 {
1704 	return 0;
1705 }
1706 
1707 int
1708 bwfm_pci_buscore_reset(struct bwfm_softc *bwfm)
1709 {
1710 	struct bwfm_pci_softc *sc = (void *)bwfm;
1711 	struct bwfm_core *core;
1712 	uint32_t reg;
1713 	int i;
1714 
1715 	bwfm_pci_select_core(sc, BWFM_AGENT_CORE_PCIE2);
1716 	reg = pci_conf_read(sc->sc_pc, sc->sc_tag,
1717 	    BWFM_PCI_CFGREG_LINK_STATUS_CTRL);
1718 	pci_conf_write(sc->sc_pc, sc->sc_tag, BWFM_PCI_CFGREG_LINK_STATUS_CTRL,
1719 	    reg & ~BWFM_PCI_CFGREG_LINK_STATUS_CTRL_ASPM_ENAB);
1720 
1721 	bwfm_pci_select_core(sc, BWFM_AGENT_CORE_CHIPCOMMON);
1722 	bus_space_write_4(sc->sc_reg_iot, sc->sc_reg_ioh,
1723 	    BWFM_CHIP_REG_WATCHDOG, 4);
1724 	delay(100 * 1000);
1725 
1726 	bwfm_pci_select_core(sc, BWFM_AGENT_CORE_PCIE2);
1727 	pci_conf_write(sc->sc_pc, sc->sc_tag,
1728 	    BWFM_PCI_CFGREG_LINK_STATUS_CTRL, reg);
1729 
1730 	core = bwfm_chip_get_core(bwfm, BWFM_AGENT_CORE_PCIE2);
1731 	if (core->co_rev <= 13) {
1732 		uint16_t cfg_offset[] = {
1733 		    BWFM_PCI_CFGREG_STATUS_CMD,
1734 		    BWFM_PCI_CFGREG_PM_CSR,
1735 		    BWFM_PCI_CFGREG_MSI_CAP,
1736 		    BWFM_PCI_CFGREG_MSI_ADDR_L,
1737 		    BWFM_PCI_CFGREG_MSI_ADDR_H,
1738 		    BWFM_PCI_CFGREG_MSI_DATA,
1739 		    BWFM_PCI_CFGREG_LINK_STATUS_CTRL2,
1740 		    BWFM_PCI_CFGREG_RBAR_CTRL,
1741 		    BWFM_PCI_CFGREG_PML1_SUB_CTRL1,
1742 		    BWFM_PCI_CFGREG_REG_BAR2_CONFIG,
1743 		    BWFM_PCI_CFGREG_REG_BAR3_CONFIG,
1744 		};
1745 
1746 		for (i = 0; i < nitems(cfg_offset); i++) {
1747 			bus_space_write_4(sc->sc_reg_iot, sc->sc_reg_ioh,
1748 			    BWFM_PCI_PCIE2REG_CONFIGADDR, cfg_offset[i]);
1749 			reg = bus_space_read_4(sc->sc_reg_iot, sc->sc_reg_ioh,
1750 			    BWFM_PCI_PCIE2REG_CONFIGDATA);
1751 			DPRINTFN(3, ("%s: config offset 0x%04x, value 0x%04x\n",
1752 			    DEVNAME(sc), cfg_offset[i], reg));
1753 			bus_space_write_4(sc->sc_reg_iot, sc->sc_reg_ioh,
1754 			    BWFM_PCI_PCIE2REG_CONFIGDATA, reg);
1755 		}
1756 	}
1757 
1758 	reg = bwfm_pci_intr_status(sc);
1759 	if (reg != 0xffffffff)
1760 		bwfm_pci_intr_ack(sc, reg);
1761 
1762 	return 0;
1763 }
1764 
1765 void
1766 bwfm_pci_buscore_activate(struct bwfm_softc *bwfm, uint32_t rstvec)
1767 {
1768 	struct bwfm_pci_softc *sc = (void *)bwfm;
1769 	bus_space_write_4(sc->sc_tcm_iot, sc->sc_tcm_ioh, 0, rstvec);
1770 }
1771 
1772 static int bwfm_pci_prio2fifo[8] = {
1773 	0, /* best effort */
1774 	1, /* IPTOS_PREC_IMMEDIATE */
1775 	1, /* IPTOS_PREC_PRIORITY */
1776 	0, /* IPTOS_PREC_FLASH */
1777 	2, /* IPTOS_PREC_FLASHOVERRIDE */
1778 	2, /* IPTOS_PREC_CRITIC_ECP */
1779 	3, /* IPTOS_PREC_INTERNETCONTROL */
1780 	3, /* IPTOS_PREC_NETCONTROL */
1781 };
1782 
1783 int
1784 bwfm_pci_flowring_lookup(struct bwfm_pci_softc *sc, struct mbuf *m)
1785 {
1786 	struct ieee80211com *ic = &sc->sc_sc.sc_ic;
1787 #ifndef IEEE80211_STA_ONLY
1788 	uint8_t *da = mtod(m, uint8_t *);
1789 #endif
1790 	int flowid, prio, fifo;
1791 	int i, found;
1792 
1793 	prio = ieee80211_classify(ic, m);
1794 	fifo = bwfm_pci_prio2fifo[prio];
1795 
1796 	switch (ic->ic_opmode)
1797 	{
1798 	case IEEE80211_M_STA:
1799 		flowid = fifo;
1800 		break;
1801 #ifndef IEEE80211_STA_ONLY
1802 	case IEEE80211_M_HOSTAP:
1803 		if (ETHER_IS_MULTICAST(da))
1804 			da = etherbroadcastaddr;
1805 		flowid = da[5] * 2 + fifo;
1806 		break;
1807 #endif
1808 	default:
1809 		printf("%s: state not supported\n", DEVNAME(sc));
1810 		return ENOBUFS;
1811 	}
1812 
1813 	found = 0;
1814 	flowid = flowid % sc->sc_max_flowrings;
1815 	for (i = 0; i < sc->sc_max_flowrings; i++) {
1816 		if (ic->ic_opmode == IEEE80211_M_STA &&
1817 		    sc->sc_flowrings[flowid].status >= RING_OPEN &&
1818 		    sc->sc_flowrings[flowid].fifo == fifo) {
1819 			found = 1;
1820 			break;
1821 		}
1822 #ifndef IEEE80211_STA_ONLY
1823 		if (ic->ic_opmode == IEEE80211_M_HOSTAP &&
1824 		    sc->sc_flowrings[flowid].status >= RING_OPEN &&
1825 		    sc->sc_flowrings[flowid].fifo == fifo &&
1826 		    !memcmp(sc->sc_flowrings[flowid].mac, da, ETHER_ADDR_LEN)) {
1827 			found = 1;
1828 			break;
1829 		}
1830 #endif
1831 		flowid = (flowid + 1) % sc->sc_max_flowrings;
1832 	}
1833 
1834 	if (found)
1835 		return flowid;
1836 
1837 	return -1;
1838 }
1839 
1840 void
1841 bwfm_pci_flowring_create(struct bwfm_pci_softc *sc, struct mbuf *m)
1842 {
1843 	struct ieee80211com *ic = &sc->sc_sc.sc_ic;
1844 	struct bwfm_cmd_flowring_create cmd;
1845 #ifndef IEEE80211_STA_ONLY
1846 	uint8_t *da = mtod(m, uint8_t *);
1847 #endif
1848 	struct bwfm_pci_msgring *ring;
1849 	int flowid, prio, fifo;
1850 	int i, found;
1851 
1852 	prio = ieee80211_classify(ic, m);
1853 	fifo = bwfm_pci_prio2fifo[prio];
1854 
1855 	switch (ic->ic_opmode)
1856 	{
1857 	case IEEE80211_M_STA:
1858 		flowid = fifo;
1859 		break;
1860 #ifndef IEEE80211_STA_ONLY
1861 	case IEEE80211_M_HOSTAP:
1862 		if (ETHER_IS_MULTICAST(da))
1863 			da = etherbroadcastaddr;
1864 		flowid = da[5] * 2 + fifo;
1865 		break;
1866 #endif
1867 	default:
1868 		printf("%s: state not supported\n", DEVNAME(sc));
1869 		return;
1870 	}
1871 
1872 	found = 0;
1873 	flowid = flowid % sc->sc_max_flowrings;
1874 	for (i = 0; i < sc->sc_max_flowrings; i++) {
1875 		ring = &sc->sc_flowrings[flowid];
1876 		if (ring->status == RING_CLOSED) {
1877 			ring->status = RING_OPENING;
1878 			found = 1;
1879 			break;
1880 		}
1881 		flowid = (flowid + 1) % sc->sc_max_flowrings;
1882 	}
1883 
1884 	/*
1885 	 * We cannot recover from that so far.  Only a stop/init
1886 	 * cycle can revive this if it ever happens at all.
1887 	 */
1888 	if (!found) {
1889 		printf("%s: no flowring available\n", DEVNAME(sc));
1890 		return;
1891 	}
1892 
1893 	cmd.m = m;
1894 	cmd.prio = prio;
1895 	cmd.flowid = flowid;
1896 	bwfm_do_async(&sc->sc_sc, bwfm_pci_flowring_create_cb, &cmd, sizeof(cmd));
1897 }
1898 
1899 void
1900 bwfm_pci_flowring_create_cb(struct bwfm_softc *bwfm, void *arg)
1901 {
1902 	struct bwfm_pci_softc *sc = (void *)bwfm;
1903 #ifndef IEEE80211_STA_ONLY
1904 	struct ieee80211com *ic = &sc->sc_sc.sc_ic;
1905 #endif
1906 	struct bwfm_cmd_flowring_create *cmd = arg;
1907 	struct msgbuf_tx_flowring_create_req *req;
1908 	struct bwfm_pci_msgring *ring;
1909 	uint8_t *da, *sa;
1910 	int s;
1911 
1912 	da = mtod(cmd->m, char *) + 0 * ETHER_ADDR_LEN;
1913 	sa = mtod(cmd->m, char *) + 1 * ETHER_ADDR_LEN;
1914 
1915 	ring = &sc->sc_flowrings[cmd->flowid];
1916 	if (ring->status != RING_OPENING) {
1917 		printf("%s: flowring not opening\n", DEVNAME(sc));
1918 		return;
1919 	}
1920 
1921 	if (bwfm_pci_setup_flowring(sc, ring, 512, 48)) {
1922 		printf("%s: cannot setup flowring\n", DEVNAME(sc));
1923 		return;
1924 	}
1925 
1926 	s = splnet();
1927 	req = bwfm_pci_ring_write_reserve(sc, &sc->sc_ctrl_submit);
1928 	if (req == NULL) {
1929 		printf("%s: cannot reserve for flowring\n", DEVNAME(sc));
1930 		splx(s);
1931 		return;
1932 	}
1933 
1934 	ring->status = RING_OPENING;
1935 	ring->fifo = bwfm_pci_prio2fifo[cmd->prio];
1936 	ring->m = cmd->m;
1937 	memcpy(ring->mac, da, ETHER_ADDR_LEN);
1938 #ifndef IEEE80211_STA_ONLY
1939 	if (ic->ic_opmode == IEEE80211_M_HOSTAP && ETHER_IS_MULTICAST(da))
1940 		memcpy(ring->mac, etherbroadcastaddr, ETHER_ADDR_LEN);
1941 #endif
1942 
1943 	req->msg.msgtype = MSGBUF_TYPE_FLOW_RING_CREATE;
1944 	req->msg.ifidx = 0;
1945 	req->msg.request_id = 0;
1946 	req->tid = bwfm_pci_prio2fifo[cmd->prio];
1947 	req->flow_ring_id = letoh16(cmd->flowid + 2);
1948 	memcpy(req->da, da, ETHER_ADDR_LEN);
1949 	memcpy(req->sa, sa, ETHER_ADDR_LEN);
1950 	req->flow_ring_addr.high_addr =
1951 	    letoh32(BWFM_PCI_DMA_DVA(ring->ring) >> 32);
1952 	req->flow_ring_addr.low_addr =
1953 	    letoh32(BWFM_PCI_DMA_DVA(ring->ring) & 0xffffffff);
1954 	req->max_items = letoh16(512);
1955 	req->len_item = letoh16(48);
1956 
1957 	bwfm_pci_ring_write_commit(sc, &sc->sc_ctrl_submit);
1958 	splx(s);
1959 }
1960 
1961 void
1962 bwfm_pci_flowring_delete(struct bwfm_pci_softc *sc, int flowid)
1963 {
1964 	struct msgbuf_tx_flowring_delete_req *req;
1965 	struct bwfm_pci_msgring *ring;
1966 	int s;
1967 
1968 	ring = &sc->sc_flowrings[flowid];
1969 	if (ring->status != RING_OPEN) {
1970 		printf("%s: flowring not open\n", DEVNAME(sc));
1971 		return;
1972 	}
1973 
1974 	s = splnet();
1975 	req = bwfm_pci_ring_write_reserve(sc, &sc->sc_ctrl_submit);
1976 	if (req == NULL) {
1977 		printf("%s: cannot reserve for flowring\n", DEVNAME(sc));
1978 		splx(s);
1979 		return;
1980 	}
1981 
1982 	ring->status = RING_CLOSING;
1983 
1984 	req->msg.msgtype = MSGBUF_TYPE_FLOW_RING_DELETE;
1985 	req->msg.ifidx = 0;
1986 	req->msg.request_id = 0;
1987 	req->flow_ring_id = letoh16(flowid + 2);
1988 	req->reason = 0;
1989 
1990 	bwfm_pci_ring_write_commit(sc, &sc->sc_ctrl_submit);
1991 	splx(s);
1992 }
1993 
1994 void
1995 bwfm_pci_stop(struct bwfm_softc *bwfm)
1996 {
1997 	struct bwfm_pci_softc *sc = (void *)bwfm;
1998 	struct bwfm_pci_msgring *ring;
1999 	int i;
2000 
2001 	for (i = 0; i < sc->sc_max_flowrings; i++) {
2002 		ring = &sc->sc_flowrings[i];
2003 		if (ring->status == RING_OPEN)
2004 			bwfm_pci_flowring_delete(sc, i);
2005 	}
2006 }
2007 
2008 int
2009 bwfm_pci_txcheck(struct bwfm_softc *bwfm)
2010 {
2011 	struct bwfm_pci_softc *sc = (void *)bwfm;
2012 	struct bwfm_pci_msgring *ring;
2013 	int i;
2014 
2015 	/* If we are transitioning, we cannot send. */
2016 	for (i = 0; i < sc->sc_max_flowrings; i++) {
2017 		ring = &sc->sc_flowrings[i];
2018 		if (ring->status == RING_OPENING)
2019 			return ENOBUFS;
2020 	}
2021 
2022 	if (bwfm_pci_pktid_avail(sc, &sc->sc_tx_pkts)) {
2023 		sc->sc_tx_pkts_full = 1;
2024 		return ENOBUFS;
2025 	}
2026 
2027 	return 0;
2028 }
2029 
2030 int
2031 bwfm_pci_txdata(struct bwfm_softc *bwfm, struct mbuf *m)
2032 {
2033 	struct bwfm_pci_softc *sc = (void *)bwfm;
2034 	struct bwfm_pci_msgring *ring;
2035 	struct msgbuf_tx_msghdr *tx;
2036 	uint32_t pktid;
2037 	paddr_t paddr;
2038 	int flowid, ret;
2039 
2040 	flowid = bwfm_pci_flowring_lookup(sc, m);
2041 	if (flowid < 0) {
2042 		/*
2043 		 * We cannot send the packet right now as there is
2044 		 * no flowring yet.  The flowring will be created
2045 		 * asynchronously.  While the ring is transitioning
2046 		 * the TX check will tell the upper layers that we
2047 		 * cannot send packets right now.  When the flowring
2048 		 * is created the queue will be restarted and this
2049 		 * mbuf will be transmitted.
2050 		 */
2051 		bwfm_pci_flowring_create(sc, m);
2052 		return 0;
2053 	}
2054 
2055 	ring = &sc->sc_flowrings[flowid];
2056 	if (ring->status == RING_OPENING ||
2057 	    ring->status == RING_CLOSING) {
2058 		printf("%s: tried to use a flow that was "
2059 		    "transitioning in status %d\n",
2060 		    DEVNAME(sc), ring->status);
2061 		return ENOBUFS;
2062 	}
2063 
2064 	tx = bwfm_pci_ring_write_reserve(sc, ring);
2065 	if (tx == NULL)
2066 		return ENOBUFS;
2067 
2068 	memset(tx, 0, sizeof(*tx));
2069 	tx->msg.msgtype = MSGBUF_TYPE_TX_POST;
2070 	tx->msg.ifidx = 0;
2071 	tx->flags = BWFM_MSGBUF_PKT_FLAGS_FRAME_802_3;
2072 	tx->flags |= ieee80211_classify(&sc->sc_sc.sc_ic, m) <<
2073 	    BWFM_MSGBUF_PKT_FLAGS_PRIO_SHIFT;
2074 	tx->seg_cnt = 1;
2075 	memcpy(tx->txhdr, mtod(m, char *), ETHER_HDR_LEN);
2076 
2077 	ret = bwfm_pci_pktid_new(sc, &sc->sc_tx_pkts, m, &pktid, &paddr);
2078 	if (ret) {
2079 		if (ret == ENOBUFS) {
2080 			printf("%s: no pktid available for TX\n",
2081 			    DEVNAME(sc));
2082 			sc->sc_tx_pkts_full = 1;
2083 		}
2084 		bwfm_pci_ring_write_cancel(sc, ring, 1);
2085 		return ret;
2086 	}
2087 	paddr += ETHER_HDR_LEN;
2088 
2089 	tx->msg.request_id = htole32(pktid + 1);
2090 	tx->data_len = htole16(m->m_len - ETHER_HDR_LEN);
2091 	tx->data_buf_addr.high_addr = htole32((uint64_t)paddr >> 32);
2092 	tx->data_buf_addr.low_addr = htole32(paddr & 0xffffffff);
2093 
2094 	bwfm_pci_ring_write_commit(sc, ring);
2095 	return 0;
2096 }
2097 
2098 int
2099 bwfm_pci_send_mb_data(struct bwfm_pci_softc *sc, uint32_t htod_mb_data)
2100 {
2101 	struct bwfm_softc *bwfm = (void *)sc;
2102 	struct bwfm_core *core;
2103 	uint32_t reg;
2104 	int i;
2105 
2106 	for (i = 0; i < 100; i++) {
2107 		reg = bus_space_read_4(sc->sc_tcm_iot, sc->sc_tcm_ioh,
2108 		    sc->sc_htod_mb_data_addr);
2109 		if (reg == 0)
2110 			break;
2111 		delay(10 * 1000);
2112 	}
2113 	if (i == 100) {
2114 		DPRINTF(("%s: MB transaction already pending\n", DEVNAME(sc)));
2115 		return EIO;
2116 	}
2117 
2118 	bus_space_write_4(sc->sc_tcm_iot, sc->sc_tcm_ioh,
2119 	    sc->sc_htod_mb_data_addr, htod_mb_data);
2120 	pci_conf_write(sc->sc_pc, sc->sc_tag, BWFM_PCI_REG_SBMBX, 1);
2121 
2122 	core = bwfm_chip_get_core(bwfm, BWFM_AGENT_CORE_PCIE2);
2123 	if (core->co_rev <= 13)
2124 		pci_conf_write(sc->sc_pc, sc->sc_tag, BWFM_PCI_REG_SBMBX, 1);
2125 
2126 	return 0;
2127 }
2128 
2129 void
2130 bwfm_pci_handle_mb_data(struct bwfm_pci_softc *sc)
2131 {
2132 	uint32_t reg;
2133 
2134 	reg = bus_space_read_4(sc->sc_tcm_iot, sc->sc_tcm_ioh,
2135 	    sc->sc_dtoh_mb_data_addr);
2136 	if (reg == 0)
2137 		return;
2138 
2139 	bus_space_write_4(sc->sc_tcm_iot, sc->sc_tcm_ioh,
2140 	    sc->sc_dtoh_mb_data_addr, 0);
2141 
2142 	if (reg & BWFM_PCI_D2H_DEV_D3_ACK) {
2143 		sc->sc_mbdata_done = 1;
2144 		wakeup(&sc->sc_mbdata_done);
2145 	}
2146 
2147 	/* TODO: support more events */
2148 	if (reg & ~BWFM_PCI_D2H_DEV_D3_ACK)
2149 		printf("%s: handle MB data 0x%08x\n", DEVNAME(sc), reg);
2150 }
2151 
2152 #ifdef BWFM_DEBUG
2153 void
2154 bwfm_pci_debug_console(struct bwfm_pci_softc *sc)
2155 {
2156 	uint32_t newidx = bus_space_read_4(sc->sc_tcm_iot, sc->sc_tcm_ioh,
2157 	    sc->sc_console_base_addr + BWFM_CONSOLE_WRITEIDX);
2158 
2159 	if (newidx != sc->sc_console_readidx)
2160 		DPRINTFN(3, ("BWFM CONSOLE: "));
2161 	while (newidx != sc->sc_console_readidx) {
2162 		uint8_t ch = bus_space_read_1(sc->sc_tcm_iot, sc->sc_tcm_ioh,
2163 		    sc->sc_console_buf_addr + sc->sc_console_readidx);
2164 		sc->sc_console_readidx++;
2165 		if (sc->sc_console_readidx == sc->sc_console_buf_size)
2166 			sc->sc_console_readidx = 0;
2167 		if (ch == '\r')
2168 			continue;
2169 		DPRINTFN(3, ("%c", ch));
2170 	}
2171 }
2172 #endif
2173 
2174 int
2175 bwfm_pci_intr(void *v)
2176 {
2177 	struct bwfm_pci_softc *sc = (void *)v;
2178 	struct bwfm_softc *bwfm = (void *)sc;
2179 	struct ifnet *ifp = &sc->sc_sc.sc_ic.ic_if;
2180 	struct mbuf_list ml = MBUF_LIST_INITIALIZER();
2181 	uint32_t status, mask;
2182 
2183 	if (!sc->sc_initialized)
2184 		return 0;
2185 
2186 	status = bwfm_pci_intr_status(sc);
2187 	/* FIXME: interrupt status seems to be zero? */
2188 	if (status == 0 && bwfm->sc_chip.ch_chip == BRCM_CC_4378_CHIP_ID)
2189 		status |= BWFM_PCI_64_PCIE2REG_MAILBOXMASK_INT_D2H_DB;
2190 	if (status == 0)
2191 		return 0;
2192 
2193 	bwfm_pci_intr_disable(sc);
2194 	bwfm_pci_intr_ack(sc, status);
2195 
2196 	if (bwfm->sc_chip.ch_chip != BRCM_CC_4378_CHIP_ID &&
2197 	    (status & (BWFM_PCI_PCIE2REG_MAILBOXMASK_INT_FN0_0 |
2198 	    BWFM_PCI_PCIE2REG_MAILBOXMASK_INT_FN0_1)))
2199 		bwfm_pci_handle_mb_data(sc);
2200 
2201 	mask = BWFM_PCI_PCIE2REG_MAILBOXMASK_INT_D2H_DB;
2202 	if (bwfm->sc_chip.ch_chip == BRCM_CC_4378_CHIP_ID)
2203 		mask = BWFM_PCI_64_PCIE2REG_MAILBOXMASK_INT_D2H_DB;
2204 
2205 	if (status & mask) {
2206 		bwfm_pci_ring_rx(sc, &sc->sc_rx_complete, &ml);
2207 		bwfm_pci_ring_rx(sc, &sc->sc_tx_complete, &ml);
2208 		bwfm_pci_ring_rx(sc, &sc->sc_ctrl_complete, &ml);
2209 
2210 		if (ifiq_input(&ifp->if_rcv, &ml))
2211 			if_rxr_livelocked(&sc->sc_rxbuf_ring);
2212 	}
2213 
2214 #ifdef BWFM_DEBUG
2215 	bwfm_pci_debug_console(sc);
2216 #endif
2217 
2218 	bwfm_pci_intr_enable(sc);
2219 	return 1;
2220 }
2221 
2222 void
2223 bwfm_pci_intr_enable(struct bwfm_pci_softc *sc)
2224 {
2225 	struct bwfm_softc *bwfm = (void *)sc;
2226 
2227 	if (bwfm->sc_chip.ch_chip == BRCM_CC_4378_CHIP_ID)
2228 		bus_space_write_4(sc->sc_reg_iot, sc->sc_reg_ioh,
2229 		    BWFM_PCI_64_PCIE2REG_MAILBOXMASK,
2230 		    BWFM_PCI_64_PCIE2REG_MAILBOXMASK_INT_D2H_DB);
2231 	else
2232 		bus_space_write_4(sc->sc_reg_iot, sc->sc_reg_ioh,
2233 		    BWFM_PCI_PCIE2REG_MAILBOXMASK,
2234 		    BWFM_PCI_PCIE2REG_MAILBOXMASK_INT_FN0_0 |
2235 		    BWFM_PCI_PCIE2REG_MAILBOXMASK_INT_FN0_1 |
2236 		    BWFM_PCI_PCIE2REG_MAILBOXMASK_INT_D2H_DB);
2237 }
2238 
2239 void
2240 bwfm_pci_intr_disable(struct bwfm_pci_softc *sc)
2241 {
2242 	struct bwfm_softc *bwfm = (void *)sc;
2243 
2244 	if (bwfm->sc_chip.ch_chip == BRCM_CC_4378_CHIP_ID)
2245 		bus_space_write_4(sc->sc_reg_iot, sc->sc_reg_ioh,
2246 		    BWFM_PCI_64_PCIE2REG_MAILBOXMASK, 0);
2247 	else
2248 		bus_space_write_4(sc->sc_reg_iot, sc->sc_reg_ioh,
2249 		    BWFM_PCI_PCIE2REG_MAILBOXMASK, 0);
2250 }
2251 
2252 uint32_t
2253 bwfm_pci_intr_status(struct bwfm_pci_softc *sc)
2254 {
2255 	struct bwfm_softc *bwfm = (void *)sc;
2256 
2257 	if (bwfm->sc_chip.ch_chip == BRCM_CC_4378_CHIP_ID)
2258 		return bus_space_read_4(sc->sc_reg_iot, sc->sc_reg_ioh,
2259 		    BWFM_PCI_64_PCIE2REG_MAILBOXINT);
2260 	else
2261 		return bus_space_read_4(sc->sc_reg_iot, sc->sc_reg_ioh,
2262 		    BWFM_PCI_PCIE2REG_MAILBOXINT);
2263 }
2264 
2265 void
2266 bwfm_pci_intr_ack(struct bwfm_pci_softc *sc, uint32_t status)
2267 {
2268 	struct bwfm_softc *bwfm = (void *)sc;
2269 
2270 	if (bwfm->sc_chip.ch_chip == BRCM_CC_4378_CHIP_ID)
2271 		bus_space_write_4(sc->sc_reg_iot, sc->sc_reg_ioh,
2272 		    BWFM_PCI_64_PCIE2REG_MAILBOXINT, status);
2273 	else
2274 		bus_space_write_4(sc->sc_reg_iot, sc->sc_reg_ioh,
2275 		    BWFM_PCI_PCIE2REG_MAILBOXINT, status);
2276 }
2277 
2278 uint32_t
2279 bwfm_pci_intmask(struct bwfm_pci_softc *sc)
2280 {
2281 	struct bwfm_softc *bwfm = (void *)sc;
2282 
2283 	if (bwfm->sc_chip.ch_chip == BRCM_CC_4378_CHIP_ID)
2284 		return bus_space_read_4(sc->sc_reg_iot, sc->sc_reg_ioh,
2285 		    BWFM_PCI_64_PCIE2REG_INTMASK);
2286 	else
2287 		return bus_space_read_4(sc->sc_reg_iot, sc->sc_reg_ioh,
2288 		    BWFM_PCI_PCIE2REG_INTMASK);
2289 }
2290 
2291 void
2292 bwfm_pci_hostready(struct bwfm_pci_softc *sc)
2293 {
2294 	struct bwfm_softc *bwfm = (void *)sc;
2295 
2296 	if ((sc->sc_shared_flags & BWFM_SHARED_INFO_HOSTRDY_DB1) == 0)
2297 		return;
2298 
2299 	if (bwfm->sc_chip.ch_chip == BRCM_CC_4378_CHIP_ID)
2300 		bus_space_write_4(sc->sc_reg_iot, sc->sc_reg_ioh,
2301 		    BWFM_PCI_64_PCIE2REG_H2D_MAILBOX_1, 1);
2302 	else
2303 		bus_space_write_4(sc->sc_reg_iot, sc->sc_reg_ioh,
2304 		    BWFM_PCI_PCIE2REG_H2D_MAILBOX_1, 1);
2305 }
2306 
2307 /* Msgbuf protocol implementation */
2308 int
2309 bwfm_pci_msgbuf_query_dcmd(struct bwfm_softc *bwfm, int ifidx,
2310     int cmd, char *buf, size_t *len)
2311 {
2312 	struct bwfm_pci_softc *sc = (void *)bwfm;
2313 	struct msgbuf_ioctl_req_hdr *req;
2314 	struct bwfm_pci_ioctl *ctl;
2315 	struct mbuf *m;
2316 	uint32_t pktid;
2317 	paddr_t paddr;
2318 	size_t buflen;
2319 	int s;
2320 
2321 	buflen = min(*len, BWFM_DMA_H2D_IOCTL_BUF_LEN);
2322 	m = MCLGETL(NULL, M_DONTWAIT, buflen);
2323 	if (m == NULL)
2324 		return 1;
2325 	m->m_len = m->m_pkthdr.len = buflen;
2326 
2327 	if (buf)
2328 		memcpy(mtod(m, char *), buf, buflen);
2329 	else
2330 		memset(mtod(m, char *), 0, buflen);
2331 
2332 	s = splnet();
2333 	req = bwfm_pci_ring_write_reserve(sc, &sc->sc_ctrl_submit);
2334 	if (req == NULL) {
2335 		splx(s);
2336 		m_freem(m);
2337 		return 1;
2338 	}
2339 
2340 	if (bwfm_pci_pktid_new(sc, &sc->sc_ioctl_pkts, m, &pktid, &paddr)) {
2341 		bwfm_pci_ring_write_cancel(sc, &sc->sc_ctrl_submit, 1);
2342 		splx(s);
2343 		m_freem(m);
2344 		return 1;
2345 	}
2346 
2347 	ctl = malloc(sizeof(*ctl), M_TEMP, M_WAITOK|M_ZERO);
2348 	ctl->transid = sc->sc_ioctl_transid++;
2349 	TAILQ_INSERT_TAIL(&sc->sc_ioctlq, ctl, next);
2350 
2351 	req->msg.msgtype = MSGBUF_TYPE_IOCTLPTR_REQ;
2352 	req->msg.ifidx = 0;
2353 	req->msg.flags = 0;
2354 	req->msg.request_id = htole32(pktid);
2355 	req->cmd = htole32(cmd);
2356 	req->output_buf_len = htole16(*len);
2357 	req->trans_id = htole16(ctl->transid);
2358 
2359 	req->input_buf_len = htole16(m->m_len);
2360 	req->req_buf_addr.high_addr = htole32((uint64_t)paddr >> 32);
2361 	req->req_buf_addr.low_addr = htole32(paddr & 0xffffffff);
2362 
2363 	bwfm_pci_ring_write_commit(sc, &sc->sc_ctrl_submit);
2364 	splx(s);
2365 
2366 	tsleep_nsec(ctl, PWAIT, "bwfm", SEC_TO_NSEC(1));
2367 	TAILQ_REMOVE(&sc->sc_ioctlq, ctl, next);
2368 
2369 	if (ctl->m == NULL) {
2370 		free(ctl, M_TEMP, sizeof(*ctl));
2371 		return 1;
2372 	}
2373 
2374 	*len = min(ctl->retlen, m->m_len);
2375 	*len = min(*len, buflen);
2376 	if (buf)
2377 		m_copydata(ctl->m, 0, *len, buf);
2378 	m_freem(ctl->m);
2379 
2380 	if (ctl->status < 0) {
2381 		free(ctl, M_TEMP, sizeof(*ctl));
2382 		return 1;
2383 	}
2384 
2385 	free(ctl, M_TEMP, sizeof(*ctl));
2386 	return 0;
2387 }
2388 
2389 int
2390 bwfm_pci_msgbuf_set_dcmd(struct bwfm_softc *bwfm, int ifidx,
2391     int cmd, char *buf, size_t len)
2392 {
2393 	return bwfm_pci_msgbuf_query_dcmd(bwfm, ifidx, cmd, buf, &len);
2394 }
2395 
2396 void
2397 bwfm_pci_msgbuf_rxioctl(struct bwfm_pci_softc *sc,
2398     struct msgbuf_ioctl_resp_hdr *resp)
2399 {
2400 	struct bwfm_pci_ioctl *ctl, *tmp;
2401 	struct mbuf *m;
2402 
2403 	m = bwfm_pci_pktid_free(sc, &sc->sc_rx_pkts,
2404 	    letoh32(resp->msg.request_id));
2405 
2406 	TAILQ_FOREACH_SAFE(ctl, &sc->sc_ioctlq, next, tmp) {
2407 		if (ctl->transid != letoh16(resp->trans_id))
2408 			continue;
2409 		ctl->m = m;
2410 		ctl->retlen = letoh16(resp->resp_len);
2411 		ctl->status = letoh16(resp->compl_hdr.status);
2412 		wakeup(ctl);
2413 		return;
2414 	}
2415 
2416 	m_freem(m);
2417 }
2418