xref: /netbsd-src/sys/dev/pci/if_bwfm_pci.c (revision 82d56013d7b633d116a93943de88e08335357a7c)
1 /*	$NetBSD: if_bwfm_pci.c,v 1.10 2021/05/08 00:27:02 thorpej Exp $	*/
2 /*	$OpenBSD: if_bwfm_pci.c,v 1.18 2018/02/08 05:00:38 patrick Exp $	*/
3 /*
4  * Copyright (c) 2010-2016 Broadcom Corporation
5  * Copyright (c) 2017 Patrick Wildt <patrick@blueri.se>
6  *
7  * Permission to use, copy, modify, and/or distribute this software for any
8  * purpose with or without fee is hereby granted, provided that the above
9  * copyright notice and this permission notice appear in all copies.
10  *
11  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
12  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
13  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
14  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
15  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
16  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
17  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
18  */
19 
20 #include <sys/param.h>
21 #include <sys/systm.h>
22 #include <sys/buf.h>
23 #include <sys/kernel.h>
24 #include <sys/kmem.h>
25 #include <sys/device.h>
26 #include <sys/pool.h>
27 #include <sys/workqueue.h>
28 #include <sys/socket.h>
29 
30 #include <net/bpf.h>
31 #include <net/if.h>
32 #include <net/if_dl.h>
33 #include <net/if_ether.h>
34 #include <net/if_media.h>
35 
36 #include <netinet/in.h>
37 
38 #include <net80211/ieee80211_var.h>
39 
40 #include <dev/pci/pcireg.h>
41 #include <dev/pci/pcivar.h>
42 #include <dev/pci/pcidevs.h>
43 
44 #include <dev/ic/bwfmreg.h>
45 #include <dev/ic/bwfmvar.h>
46 #include <dev/pci/if_bwfm_pci.h>
47 
48 #define BWFM_DMA_D2H_SCRATCH_BUF_LEN		8
49 #define BWFM_DMA_D2H_RINGUPD_BUF_LEN		1024
50 #define BWFM_DMA_H2D_IOCTL_BUF_LEN		ETHER_MAX_LEN
51 
52 #define BWFM_NUM_TX_MSGRINGS			2
53 #define BWFM_NUM_RX_MSGRINGS			3
54 
55 #define BWFM_NUM_TX_PKTIDS			2048
56 #define BWFM_NUM_RX_PKTIDS			1024
57 
58 #define BWFM_NUM_TX_DESCS			1
59 #define BWFM_NUM_RX_DESCS			1
60 
61 #ifdef BWFM_DEBUG
62 #define DPRINTF(x)	do { if (bwfm_debug > 0) printf x; } while (0)
63 #define DPRINTFN(n, x)	do { if (bwfm_debug >= (n)) printf x; } while (0)
64 static int bwfm_debug = 2;
65 #else
66 #define DPRINTF(x)	do { ; } while (0)
67 #define DPRINTFN(n, x)	do { ; } while (0)
68 #endif
69 
70 #define DEVNAME(sc)	device_xname((sc)->sc_sc.sc_dev)
71 #define letoh16		htole16
72 #define letoh32		htole32
73 #define nitems(x)	__arraycount(x)
74 
75 enum ring_status {
76 	RING_CLOSED,
77 	RING_CLOSING,
78 	RING_OPEN,
79 	RING_OPENING,
80 };
81 
82 struct bwfm_pci_msgring {
83 	uint32_t		 w_idx_addr;
84 	uint32_t		 r_idx_addr;
85 	uint32_t		 w_ptr;
86 	uint32_t		 r_ptr;
87 	int			 nitem;
88 	int			 itemsz;
89 	enum ring_status	 status;
90 	struct bwfm_pci_dmamem	*ring;
91 	struct mbuf		*m;
92 
93 	int			 fifo;
94 	uint8_t			 mac[ETHER_ADDR_LEN];
95 };
96 
97 struct bwfm_pci_buf {
98 	bus_dmamap_t	 bb_map;
99 	struct mbuf	*bb_m;
100 };
101 
102 struct bwfm_pci_pkts {
103 	struct bwfm_pci_buf	*pkts;
104 	uint32_t		 npkt;
105 	int			 last;
106 };
107 
108 struct if_rxring {
109 	u_int	rxr_total;
110 	u_int	rxr_inuse;
111 };
112 
113 struct bwfm_cmd_flowring_create {
114 	struct work		 wq_cookie;
115 	struct bwfm_pci_softc	*sc;
116 	struct mbuf		*m;
117 	int			 flowid;
118 	int			 prio;
119 };
120 
121 struct bwfm_pci_softc {
122 	struct bwfm_softc	 sc_sc;
123 	pci_chipset_tag_t	 sc_pc;
124 	pcitag_t		 sc_tag;
125 	pcireg_t		 sc_id;
126 	void			*sc_ih;
127 	pci_intr_handle_t	*sc_pihp;
128 
129 	bus_space_tag_t		 sc_reg_iot;
130 	bus_space_handle_t	 sc_reg_ioh;
131 	bus_size_t		 sc_reg_ios;
132 
133 	bus_space_tag_t		 sc_tcm_iot;
134 	bus_space_handle_t	 sc_tcm_ioh;
135 	bus_size_t		 sc_tcm_ios;
136 
137 	bus_dma_tag_t		 sc_dmat;
138 
139 	uint32_t		 sc_shared_address;
140 	uint32_t		 sc_shared_flags;
141 	uint8_t			 sc_shared_version;
142 
143 	uint8_t			 sc_dma_idx_sz;
144 	struct bwfm_pci_dmamem	*sc_dma_idx_buf;
145 	size_t			 sc_dma_idx_bufsz;
146 
147 	uint16_t		 sc_max_rxbufpost;
148 	uint32_t		 sc_rx_dataoffset;
149 	uint32_t		 sc_htod_mb_data_addr;
150 	uint32_t		 sc_dtoh_mb_data_addr;
151 	uint32_t		 sc_ring_info_addr;
152 
153 	uint32_t		 sc_console_base_addr;
154 	uint32_t		 sc_console_buf_addr;
155 	uint32_t		 sc_console_buf_size;
156 	uint32_t		 sc_console_readidx;
157 
158 	struct pool		 sc_flowring_pool;
159 	struct workqueue	*flowring_wq;
160 
161 	uint16_t		 sc_max_flowrings;
162 	uint16_t		 sc_max_submissionrings;
163 	uint16_t		 sc_max_completionrings;
164 
165 	struct bwfm_pci_msgring	 sc_ctrl_submit;
166 	struct bwfm_pci_msgring	 sc_rxpost_submit;
167 	struct bwfm_pci_msgring	 sc_ctrl_complete;
168 	struct bwfm_pci_msgring	 sc_tx_complete;
169 	struct bwfm_pci_msgring	 sc_rx_complete;
170 	struct bwfm_pci_msgring	*sc_flowrings;
171 
172 	struct bwfm_pci_dmamem	*sc_scratch_buf;
173 	struct bwfm_pci_dmamem	*sc_ringupd_buf;
174 
175 	struct bwfm_pci_dmamem	*sc_ioctl_buf;
176 	int			 sc_ioctl_reqid;
177 	uint32_t		 sc_ioctl_resp_pktid;
178 	uint32_t		 sc_ioctl_resp_ret_len;
179 	uint32_t		 sc_ioctl_resp_status;
180 	int			 sc_ioctl_poll;
181 
182 	struct if_rxring	 sc_ioctl_ring;
183 	struct if_rxring	 sc_event_ring;
184 	struct if_rxring	 sc_rxbuf_ring;
185 
186 	struct bwfm_pci_pkts	 sc_rx_pkts;
187 	struct bwfm_pci_pkts	 sc_tx_pkts;
188 	int			 sc_tx_pkts_full;
189 };
190 
191 struct bwfm_pci_dmamem {
192 	bus_dmamap_t		bdm_map;
193 	bus_dma_segment_t	bdm_seg;
194 	size_t			bdm_size;
195 	char *			bdm_kva;
196 };
197 
198 #define BWFM_PCI_DMA_MAP(_bdm)	((_bdm)->bdm_map)
199 #define BWFM_PCI_DMA_LEN(_bdm)	((_bdm)->bdm_size)
200 #define BWFM_PCI_DMA_DVA(_bdm)	(uint64_t)((_bdm)->bdm_map->dm_segs[0].ds_addr)
201 #define BWFM_PCI_DMA_KVA(_bdm)	((_bdm)->bdm_kva)
202 
203 static u_int	 if_rxr_get(struct if_rxring *rxr, unsigned int max);
204 static void	 if_rxr_put(struct if_rxring *rxr, unsigned int n);
205 static void	 if_rxr_init(struct if_rxring *rxr, unsigned int lwm, unsigned int hwm);
206 
207 int		 bwfm_pci_match(device_t parent, cfdata_t match, void *aux);
208 void		 bwfm_pci_attachhook(device_t);
209 void		 bwfm_pci_attach(device_t, device_t, void *);
210 int		 bwfm_pci_detach(device_t, int);
211 
212 int		 bwfm_pci_intr(void *);
213 void		 bwfm_pci_intr_enable(struct bwfm_pci_softc *);
214 void		 bwfm_pci_intr_disable(struct bwfm_pci_softc *);
215 int		 bwfm_pci_load_microcode(struct bwfm_pci_softc *, const u_char *,
216 		    size_t);
217 void		 bwfm_pci_select_core(struct bwfm_pci_softc *, int );
218 
219 struct bwfm_pci_dmamem *
220 		 bwfm_pci_dmamem_alloc(struct bwfm_pci_softc *, bus_size_t,
221 		    bus_size_t);
222 void		 bwfm_pci_dmamem_free(struct bwfm_pci_softc *, struct bwfm_pci_dmamem *);
223 int		 bwfm_pci_pktid_avail(struct bwfm_pci_softc *,
224 		    struct bwfm_pci_pkts *);
225 int		 bwfm_pci_pktid_new(struct bwfm_pci_softc *,
226 		    struct bwfm_pci_pkts *, struct mbuf **,
227 		    uint32_t *, paddr_t *);
228 struct mbuf *	 bwfm_pci_pktid_free(struct bwfm_pci_softc *,
229 		    struct bwfm_pci_pkts *, uint32_t);
230 void		 bwfm_pci_fill_rx_ioctl_ring(struct bwfm_pci_softc *,
231 		    struct if_rxring *, uint32_t);
232 void		 bwfm_pci_fill_rx_buf_ring(struct bwfm_pci_softc *);
233 void		 bwfm_pci_fill_rx_rings(struct bwfm_pci_softc *);
234 int		 bwfm_pci_setup_ring(struct bwfm_pci_softc *, struct bwfm_pci_msgring *,
235 		    int, size_t, uint32_t, uint32_t, int, uint32_t, uint32_t *);
236 int		 bwfm_pci_setup_flowring(struct bwfm_pci_softc *, struct bwfm_pci_msgring *,
237 		    int, size_t);
238 
239 void		 bwfm_pci_ring_bell(struct bwfm_pci_softc *,
240 		    struct bwfm_pci_msgring *);
241 void		 bwfm_pci_ring_update_rptr(struct bwfm_pci_softc *,
242 		    struct bwfm_pci_msgring *);
243 void		 bwfm_pci_ring_update_wptr(struct bwfm_pci_softc *,
244 		    struct bwfm_pci_msgring *);
245 void		 bwfm_pci_ring_write_rptr(struct bwfm_pci_softc *,
246 		    struct bwfm_pci_msgring *);
247 void		 bwfm_pci_ring_write_wptr(struct bwfm_pci_softc *,
248 		    struct bwfm_pci_msgring *);
249 void *		 bwfm_pci_ring_write_reserve(struct bwfm_pci_softc *,
250 		    struct bwfm_pci_msgring *);
251 void *		 bwfm_pci_ring_write_reserve_multi(struct bwfm_pci_softc *,
252 		    struct bwfm_pci_msgring *, int, int *);
253 void *		 bwfm_pci_ring_read_avail(struct bwfm_pci_softc *,
254 		    struct bwfm_pci_msgring *, int *);
255 void		 bwfm_pci_ring_read_commit(struct bwfm_pci_softc *,
256 		    struct bwfm_pci_msgring *, int);
257 void		 bwfm_pci_ring_write_commit(struct bwfm_pci_softc *,
258 		    struct bwfm_pci_msgring *);
259 void		 bwfm_pci_ring_write_cancel(struct bwfm_pci_softc *,
260 		    struct bwfm_pci_msgring *, int);
261 
262 void		 bwfm_pci_ring_rx(struct bwfm_pci_softc *,
263 		    struct bwfm_pci_msgring *);
264 void		 bwfm_pci_msg_rx(struct bwfm_pci_softc *, void *);
265 
266 uint32_t	 bwfm_pci_buscore_read(struct bwfm_softc *, uint32_t);
267 void		 bwfm_pci_buscore_write(struct bwfm_softc *, uint32_t,
268 		    uint32_t);
269 int		 bwfm_pci_buscore_prepare(struct bwfm_softc *);
270 int		 bwfm_pci_buscore_reset(struct bwfm_softc *);
271 void		 bwfm_pci_buscore_activate(struct bwfm_softc *, const uint32_t);
272 
273 int		 bwfm_pci_flowring_lookup(struct bwfm_pci_softc *,
274 		     struct mbuf *);
275 void		 bwfm_pci_flowring_create(struct bwfm_pci_softc *,
276 		     struct mbuf *);
277 void		 bwfm_pci_flowring_create_cb(struct work *, void *);
278 void		 bwfm_pci_flowring_delete(struct bwfm_pci_softc *, int);
279 
280 void		 bwfm_pci_stop(struct bwfm_softc *);
281 int		 bwfm_pci_txcheck(struct bwfm_softc *);
282 int		 bwfm_pci_txdata(struct bwfm_softc *, struct mbuf **);
283 
284 #ifdef BWFM_DEBUG
285 void		 bwfm_pci_debug_console(struct bwfm_pci_softc *);
286 #endif
287 
288 int		 bwfm_pci_msgbuf_query_dcmd(struct bwfm_softc *, int,
289 		    int, char *, size_t *);
290 int		 bwfm_pci_msgbuf_set_dcmd(struct bwfm_softc *, int,
291 		    int, char *, size_t);
292 
293 static const struct bwfm_buscore_ops bwfm_pci_buscore_ops = {
294 	.bc_read = bwfm_pci_buscore_read,
295 	.bc_write = bwfm_pci_buscore_write,
296 	.bc_prepare = bwfm_pci_buscore_prepare,
297 	.bc_reset = bwfm_pci_buscore_reset,
298 	.bc_setup = NULL,
299 	.bc_activate = bwfm_pci_buscore_activate,
300 };
301 
302 static const struct bwfm_bus_ops bwfm_pci_bus_ops = {
303 	.bs_init = NULL,
304 	.bs_stop = bwfm_pci_stop,
305 	.bs_txcheck = bwfm_pci_txcheck,
306 	.bs_txdata = bwfm_pci_txdata,
307 	.bs_txctl = NULL,
308 	.bs_rxctl = NULL,
309 };
310 
311 static const struct bwfm_proto_ops bwfm_pci_msgbuf_ops = {
312 	.proto_query_dcmd = bwfm_pci_msgbuf_query_dcmd,
313 	.proto_set_dcmd = bwfm_pci_msgbuf_set_dcmd,
314 };
315 
316 
317 CFATTACH_DECL_NEW(bwfm_pci, sizeof(struct bwfm_pci_softc),
318     bwfm_pci_match, bwfm_pci_attach, bwfm_pci_detach, NULL);
319 
320 static const struct bwfm_firmware_selector bwfm_pci_fwtab[] = {
321 	BWFM_FW_ENTRY(BRCM_CC_43602_CHIP_ID,
322 		      BWFM_FWSEL_ALLREVS, "brcmfmac43602-pcie"),
323 
324 	BWFM_FW_ENTRY(BRCM_CC_43465_CHIP_ID,
325 		      BWFM_FWSEL_REV_GE(4), "brcmfmac4366c-pcie"),
326 
327 	BWFM_FW_ENTRY(BRCM_CC_4350_CHIP_ID,
328 		      BWFM_FWSEL_REV_LE(7), "brcmfmac4350c2-pcie"),
329 	BWFM_FW_ENTRY(BRCM_CC_4350_CHIP_ID,
330 		      BWFM_FWSEL_REV_GE(8), "brcmfmac4350-pcie"),
331 
332 	BWFM_FW_ENTRY(BRCM_CC_43525_CHIP_ID,
333 		      BWFM_FWSEL_REV_GE(4), "brcmfmac4365c-pcie"),
334 
335 	BWFM_FW_ENTRY(BRCM_CC_4356_CHIP_ID,
336 		      BWFM_FWSEL_ALLREVS, "brcmfmac4356-pcie"),
337 
338 	BWFM_FW_ENTRY(BRCM_CC_43567_CHIP_ID,
339 		      BWFM_FWSEL_ALLREVS, "brcmfmac43570-pcie"),
340 	BWFM_FW_ENTRY(BRCM_CC_43569_CHIP_ID,
341 		      BWFM_FWSEL_ALLREVS, "brcmfmac43570-pcie"),
342 	BWFM_FW_ENTRY(BRCM_CC_43570_CHIP_ID,
343 		      BWFM_FWSEL_ALLREVS, "brcmfmac43570-pcie"),
344 
345 	BWFM_FW_ENTRY(BRCM_CC_4358_CHIP_ID,
346 		      BWFM_FWSEL_ALLREVS, "brcmfmac4358-pcie"),
347 
348 	BWFM_FW_ENTRY(BRCM_CC_4359_CHIP_ID,
349 		      BWFM_FWSEL_ALLREVS, "brcmfmac4359-pcie"),
350 
351 	BWFM_FW_ENTRY(BRCM_CC_4365_CHIP_ID,
352 		      BWFM_FWSEL_REV_LE(3), "brcmfmac4365b-pcie"),
353 	BWFM_FW_ENTRY(BRCM_CC_4365_CHIP_ID,
354 		      BWFM_FWSEL_REV_GE(4), "brcmfmac4365c-pcie"),
355 
356 	BWFM_FW_ENTRY(BRCM_CC_4366_CHIP_ID,
357 		      BWFM_FWSEL_REV_LE(3), "brcmfmac4366b-pcie"),
358 	BWFM_FW_ENTRY(BRCM_CC_4366_CHIP_ID,
359 		      BWFM_FWSEL_REV_GE(4), "brcmfmac4366c-pcie"),
360 	BWFM_FW_ENTRY(BRCM_CC_43664_CHIP_ID,
361 		      BWFM_FWSEL_REV_GE(4), "brcmfmac4366c-pcie"),
362 
363 	BWFM_FW_ENTRY(BRCM_CC_4371_CHIP_ID,
364 		      BWFM_FWSEL_ALLREVS, "brcmfmac4371-pcie"),
365 
366 	BWFM_FW_ENTRY_END
367 };
368 
369 static const struct device_compatible_entry compat_data[] = {
370 	{ .id = PCI_ID_CODE(PCI_VENDOR_BROADCOM,
371 		PCI_PRODUCT_BROADCOM_BCM43602), },
372 
373 	{ .id = PCI_ID_CODE(PCI_VENDOR_BROADCOM,
374 		PCI_PRODUCT_BROADCOM_BCM4350), },
375 
376 	PCI_COMPAT_EOL
377 };
378 
379 static struct mbuf *
380 MCLGETI(struct bwfm_pci_softc *sc __unused, int how,
381     struct ifnet *ifp __unused, u_int size)
382 {
383 	struct mbuf *m;
384 
385 	MGETHDR(m, how, MT_DATA);
386 	if (m == NULL)
387 		return NULL;
388 
389 	MEXTMALLOC(m, size, how);
390 	if ((m->m_flags & M_EXT) == 0) {
391 		m_freem(m);
392 		return NULL;
393 	}
394 	return m;
395 }
396 
397 int
398 bwfm_pci_match(device_t parent, cfdata_t match, void *aux)
399 {
400 	struct pci_attach_args *pa = aux;
401 
402 	return pci_compatible_match(pa, compat_data);
403 }
404 
405 void
406 bwfm_pci_attach(device_t parent, device_t self, void *aux)
407 {
408 	struct bwfm_pci_softc *sc = device_private(self);
409 	struct pci_attach_args *pa = (struct pci_attach_args *)aux;
410 	const char *intrstr;
411 	char intrbuf[PCI_INTRSTR_LEN];
412 
413 	sc->sc_sc.sc_dev = self;
414 
415 	if (pci_mapreg_map(pa, PCI_MAPREG_START + 0x00,
416 	    PCI_MAPREG_MEM_TYPE_64BIT, 0, &sc->sc_reg_iot, &sc->sc_reg_ioh,
417 	    NULL, &sc->sc_reg_ios)) {
418 		printf(": can't map bar0\n");
419 		return;
420 	}
421 
422 	if (pci_mapreg_map(pa, PCI_MAPREG_START + 0x08,
423 	    PCI_MAPREG_MEM_TYPE_64BIT, 0, &sc->sc_tcm_iot, &sc->sc_tcm_ioh,
424 	    NULL, &sc->sc_tcm_ios)) {
425 		printf(": can't map bar1\n");
426 		goto bar0;
427 	}
428 
429 	sc->sc_pc = pa->pa_pc;
430 	sc->sc_tag = pa->pa_tag;
431 	sc->sc_id = pa->pa_id;
432 
433 	if (pci_dma64_available(pa))
434 		sc->sc_dmat = pa->pa_dmat64;
435 	else
436 		sc->sc_dmat = pa->pa_dmat;
437 
438 	/* Map and establish the interrupt. */
439 	if (pci_intr_alloc(pa, &sc->sc_pihp, NULL, 0) != 0) {
440 		printf(": couldn't map interrupt\n");
441 		goto bar1;
442 	}
443 	intrstr = pci_intr_string(pa->pa_pc, sc->sc_pihp[0], intrbuf, sizeof(intrbuf));
444 
445 	sc->sc_ih = pci_intr_establish_xname(pa->pa_pc, sc->sc_pihp[0], IPL_NET,
446 	    bwfm_pci_intr, sc, device_xname(self));
447 	if (sc->sc_ih == NULL) {
448 		printf(": couldn't establish interrupt");
449 		if (intrstr != NULL)
450 			printf(" at %s", intrstr);
451 		printf("\n");
452 		goto bar1;
453 	}
454 	printf(": %s\n", intrstr);
455 
456 	config_mountroot(self, bwfm_pci_attachhook);
457 	return;
458 
459 bar1:
460 	bus_space_unmap(sc->sc_tcm_iot, sc->sc_tcm_ioh, sc->sc_tcm_ios);
461 bar0:
462 	bus_space_unmap(sc->sc_reg_iot, sc->sc_reg_ioh, sc->sc_reg_ios);
463 }
464 
465 void
466 bwfm_pci_attachhook(device_t self)
467 {
468 	struct bwfm_pci_softc *sc = device_private(self);
469 	struct bwfm_softc *bwfm = (void *)sc;
470 	struct bwfm_pci_ringinfo ringinfo;
471 	struct bwfm_firmware_context fwctx;
472 	uint8_t *ucode;
473 	size_t ucsize;
474 	uint32_t d2h_w_idx_ptr, d2h_r_idx_ptr;
475 	uint32_t h2d_w_idx_ptr, h2d_r_idx_ptr;
476 	uint32_t idx_offset, reg;
477 	int i;
478 
479 	sc->sc_sc.sc_buscore_ops = &bwfm_pci_buscore_ops;
480 	if (bwfm_chip_attach(&sc->sc_sc) != 0) {
481 		aprint_error_dev(bwfm->sc_dev, "cannot attach chip\n");
482 		return;
483 	}
484 
485 	bwfm_pci_select_core(sc, BWFM_AGENT_CORE_PCIE2);
486 	bus_space_write_4(sc->sc_reg_iot, sc->sc_reg_ioh,
487 	    BWFM_PCI_PCIE2REG_CONFIGADDR, 0x4e0);
488 	reg = bus_space_read_4(sc->sc_reg_iot, sc->sc_reg_ioh,
489 	    BWFM_PCI_PCIE2REG_CONFIGDATA);
490 	bus_space_write_4(sc->sc_reg_iot, sc->sc_reg_ioh,
491 	    BWFM_PCI_PCIE2REG_CONFIGDATA, reg);
492 
493 	bwfm_firmware_context_init(&fwctx,
494 	    bwfm->sc_chip.ch_chip, bwfm->sc_chip.ch_chiprev, NULL,
495 	    BWFM_FWREQ(BWFM_FILETYPE_UCODE));
496 
497 	if (!bwfm_firmware_open(bwfm, bwfm_pci_fwtab, &fwctx)) {
498 		/* Error message already displayed. */
499 		goto err;
500 	}
501 
502 	ucode = bwfm_firmware_data(&fwctx, BWFM_FILETYPE_UCODE, &ucsize);
503 	KASSERT(ucode != NULL);
504 
505 	/* Retrieve RAM size from firmware. */
506 	if (ucsize >= BWFM_RAMSIZE + 8) {
507 		uint32_t *ramsize = (uint32_t *)&ucode[BWFM_RAMSIZE];
508 		if (letoh32(ramsize[0]) == BWFM_RAMSIZE_MAGIC)
509 			bwfm->sc_chip.ch_ramsize = letoh32(ramsize[1]);
510 	}
511 
512 	if (bwfm_pci_load_microcode(sc, ucode, ucsize) != 0) {
513 		aprint_error_dev(bwfm->sc_dev, "could not load microcode\n");
514 		goto err;
515 	}
516 
517 	bwfm_firmware_close(&fwctx);
518 
519 	sc->sc_shared_flags = bus_space_read_4(sc->sc_tcm_iot, sc->sc_tcm_ioh,
520 	    sc->sc_shared_address + BWFM_SHARED_INFO);
521 	sc->sc_shared_version = sc->sc_shared_flags;
522 	if (sc->sc_shared_version > BWFM_SHARED_INFO_MAX_VERSION ||
523 	    sc->sc_shared_version < BWFM_SHARED_INFO_MIN_VERSION) {
524 		aprint_error_dev(bwfm->sc_dev,
525 		    "PCIe version %d unsupported\n", sc->sc_shared_version);
526 		return;
527 	}
528 
529 	if (sc->sc_shared_flags & BWFM_SHARED_INFO_DMA_INDEX) {
530 		if (sc->sc_shared_flags & BWFM_SHARED_INFO_DMA_2B_IDX)
531 			sc->sc_dma_idx_sz = sizeof(uint16_t);
532 		else
533 			sc->sc_dma_idx_sz = sizeof(uint32_t);
534 	}
535 
536 	/* Maximum RX data buffers in the ring. */
537 	sc->sc_max_rxbufpost = bus_space_read_2(sc->sc_tcm_iot, sc->sc_tcm_ioh,
538 	    sc->sc_shared_address + BWFM_SHARED_MAX_RXBUFPOST);
539 	if (sc->sc_max_rxbufpost == 0)
540 		sc->sc_max_rxbufpost = BWFM_SHARED_MAX_RXBUFPOST_DEFAULT;
541 
542 	/* Alternative offset of data in a packet */
543 	sc->sc_rx_dataoffset = bus_space_read_4(sc->sc_tcm_iot, sc->sc_tcm_ioh,
544 	    sc->sc_shared_address + BWFM_SHARED_RX_DATAOFFSET);
545 
546 	/* For Power Management */
547 	sc->sc_htod_mb_data_addr = bus_space_read_4(sc->sc_tcm_iot, sc->sc_tcm_ioh,
548 	    sc->sc_shared_address + BWFM_SHARED_HTOD_MB_DATA_ADDR);
549 	sc->sc_dtoh_mb_data_addr = bus_space_read_4(sc->sc_tcm_iot, sc->sc_tcm_ioh,
550 	    sc->sc_shared_address + BWFM_SHARED_DTOH_MB_DATA_ADDR);
551 
552 	/* Ring information */
553 	sc->sc_ring_info_addr = bus_space_read_4(sc->sc_tcm_iot, sc->sc_tcm_ioh,
554 	    sc->sc_shared_address + BWFM_SHARED_RING_INFO_ADDR);
555 
556 	/* Firmware's "dmesg" */
557 	sc->sc_console_base_addr = bus_space_read_4(sc->sc_tcm_iot, sc->sc_tcm_ioh,
558 	    sc->sc_shared_address + BWFM_SHARED_CONSOLE_ADDR);
559 	sc->sc_console_buf_addr = bus_space_read_4(sc->sc_tcm_iot, sc->sc_tcm_ioh,
560 	    sc->sc_console_base_addr + BWFM_CONSOLE_BUFADDR);
561 	sc->sc_console_buf_size = bus_space_read_4(sc->sc_tcm_iot, sc->sc_tcm_ioh,
562 	    sc->sc_console_base_addr + BWFM_CONSOLE_BUFSIZE);
563 
564 	/* Read ring information. */
565 	bus_space_read_region_1(sc->sc_tcm_iot, sc->sc_tcm_ioh,
566 	    sc->sc_ring_info_addr, (void *)&ringinfo, sizeof(ringinfo));
567 
568 	if (sc->sc_shared_version >= 6) {
569 		sc->sc_max_submissionrings = le16toh(ringinfo.max_submissionrings);
570 		sc->sc_max_flowrings = le16toh(ringinfo.max_flowrings);
571 		sc->sc_max_completionrings = le16toh(ringinfo.max_completionrings);
572 	} else {
573 		sc->sc_max_submissionrings = le16toh(ringinfo.max_flowrings);
574 		sc->sc_max_flowrings = sc->sc_max_submissionrings -
575 		    BWFM_NUM_TX_MSGRINGS;
576 		sc->sc_max_completionrings = BWFM_NUM_RX_MSGRINGS;
577 	}
578 
579 	if (sc->sc_dma_idx_sz == 0) {
580 		d2h_w_idx_ptr = letoh32(ringinfo.d2h_w_idx_ptr);
581 		d2h_r_idx_ptr = letoh32(ringinfo.d2h_r_idx_ptr);
582 		h2d_w_idx_ptr = letoh32(ringinfo.h2d_w_idx_ptr);
583 		h2d_r_idx_ptr = letoh32(ringinfo.h2d_r_idx_ptr);
584 		idx_offset = sizeof(uint32_t);
585 	} else {
586 		uint64_t address;
587 
588 		/* Each TX/RX Ring has a Read and Write Ptr */
589 		sc->sc_dma_idx_bufsz = (sc->sc_max_submissionrings +
590 		    sc->sc_max_completionrings) * sc->sc_dma_idx_sz * 2;
591 		sc->sc_dma_idx_buf = bwfm_pci_dmamem_alloc(sc,
592 		    sc->sc_dma_idx_bufsz, 8);
593 		if (sc->sc_dma_idx_buf == NULL) {
594 			/* XXX: Fallback to TCM? */
595 			aprint_error_dev(bwfm->sc_dev,
596 			    "cannot allocate idx buf\n");
597 			return;
598 		}
599 
600 		idx_offset = sc->sc_dma_idx_sz;
601 		h2d_w_idx_ptr = 0;
602 		address = BWFM_PCI_DMA_DVA(sc->sc_dma_idx_buf);
603 		ringinfo.h2d_w_idx_hostaddr_low =
604 		    htole32(address & 0xffffffff);
605 		ringinfo.h2d_w_idx_hostaddr_high =
606 		    htole32(address >> 32);
607 
608 		h2d_r_idx_ptr = h2d_w_idx_ptr +
609 		    sc->sc_max_submissionrings * idx_offset;
610 		address += sc->sc_max_submissionrings * idx_offset;
611 		ringinfo.h2d_r_idx_hostaddr_low =
612 		    htole32(address & 0xffffffff);
613 		ringinfo.h2d_r_idx_hostaddr_high =
614 		    htole32(address >> 32);
615 
616 		d2h_w_idx_ptr = h2d_r_idx_ptr +
617 		    sc->sc_max_submissionrings * idx_offset;
618 		address += sc->sc_max_submissionrings * idx_offset;
619 		ringinfo.d2h_w_idx_hostaddr_low =
620 		    htole32(address & 0xffffffff);
621 		ringinfo.d2h_w_idx_hostaddr_high =
622 		    htole32(address >> 32);
623 
624 		d2h_r_idx_ptr = d2h_w_idx_ptr +
625 		    sc->sc_max_completionrings * idx_offset;
626 		address += sc->sc_max_completionrings * idx_offset;
627 		ringinfo.d2h_r_idx_hostaddr_low =
628 		    htole32(address & 0xffffffff);
629 		ringinfo.d2h_r_idx_hostaddr_high =
630 		    htole32(address >> 32);
631 
632 		bus_space_write_region_1(sc->sc_tcm_iot, sc->sc_tcm_ioh,
633 		    sc->sc_ring_info_addr, (void *)&ringinfo, sizeof(ringinfo));
634 	}
635 
636 	uint32_t ring_mem_ptr = letoh32(ringinfo.ringmem);
637 	/* TX ctrl ring: Send ctrl buffers, send IOCTLs */
638 	if (bwfm_pci_setup_ring(sc, &sc->sc_ctrl_submit, 64, 40,
639 	    h2d_w_idx_ptr, h2d_r_idx_ptr, 0, idx_offset,
640 	    &ring_mem_ptr))
641 		goto cleanup;
642 	/* TX rxpost ring: Send clean data mbufs for RX */
643 	if (bwfm_pci_setup_ring(sc, &sc->sc_rxpost_submit, 512, 32,
644 	    h2d_w_idx_ptr, h2d_r_idx_ptr, 1, idx_offset,
645 	    &ring_mem_ptr))
646 		goto cleanup;
647 	/* RX completion rings: recv our filled buffers back */
648 	if (bwfm_pci_setup_ring(sc, &sc->sc_ctrl_complete, 64, 24,
649 	    d2h_w_idx_ptr, d2h_r_idx_ptr, 0, idx_offset,
650 	    &ring_mem_ptr))
651 		goto cleanup;
652 	if (bwfm_pci_setup_ring(sc, &sc->sc_tx_complete, 1024, 16,
653 	    d2h_w_idx_ptr, d2h_r_idx_ptr, 1, idx_offset,
654 	    &ring_mem_ptr))
655 		goto cleanup;
656 	if (bwfm_pci_setup_ring(sc, &sc->sc_rx_complete, 512, 32,
657 	    d2h_w_idx_ptr, d2h_r_idx_ptr, 2, idx_offset,
658 	    &ring_mem_ptr))
659 		goto cleanup;
660 
661 	/* Dynamic TX rings for actual data */
662 	sc->sc_flowrings = kmem_zalloc(sc->sc_max_flowrings *
663 	    sizeof(struct bwfm_pci_msgring), KM_SLEEP);
664 	for (i = 0; i < sc->sc_max_flowrings; i++) {
665 		struct bwfm_pci_msgring *ring = &sc->sc_flowrings[i];
666 		ring->w_idx_addr = h2d_w_idx_ptr + (i + 2) * idx_offset;
667 		ring->r_idx_addr = h2d_r_idx_ptr + (i + 2) * idx_offset;
668 	}
669 
670 	pool_init(&sc->sc_flowring_pool, sizeof(struct bwfm_cmd_flowring_create),
671 	    0, 0, 0, "bwfmpl", NULL, IPL_NET);
672 
673 	/* Scratch and ring update buffers for firmware */
674 	if ((sc->sc_scratch_buf = bwfm_pci_dmamem_alloc(sc,
675 	    BWFM_DMA_D2H_SCRATCH_BUF_LEN, 8)) == NULL)
676 		goto cleanup;
677 	bus_space_write_4(sc->sc_tcm_iot, sc->sc_tcm_ioh,
678 	    sc->sc_shared_address + BWFM_SHARED_DMA_SCRATCH_ADDR_LOW,
679 	    BWFM_PCI_DMA_DVA(sc->sc_scratch_buf) & 0xffffffff);
680 	bus_space_write_4(sc->sc_tcm_iot, sc->sc_tcm_ioh,
681 	    sc->sc_shared_address + BWFM_SHARED_DMA_SCRATCH_ADDR_HIGH,
682 	    BWFM_PCI_DMA_DVA(sc->sc_scratch_buf) >> 32);
683 	bus_space_write_4(sc->sc_tcm_iot, sc->sc_tcm_ioh,
684 	    sc->sc_shared_address + BWFM_SHARED_DMA_SCRATCH_LEN,
685 	    BWFM_DMA_D2H_SCRATCH_BUF_LEN);
686 
687 	if ((sc->sc_ringupd_buf = bwfm_pci_dmamem_alloc(sc,
688 	    BWFM_DMA_D2H_RINGUPD_BUF_LEN, 8)) == NULL)
689 		goto cleanup;
690 	bus_space_write_4(sc->sc_tcm_iot, sc->sc_tcm_ioh,
691 	    sc->sc_shared_address + BWFM_SHARED_DMA_RINGUPD_ADDR_LOW,
692 	    BWFM_PCI_DMA_DVA(sc->sc_ringupd_buf) & 0xffffffff);
693 	bus_space_write_4(sc->sc_tcm_iot, sc->sc_tcm_ioh,
694 	    sc->sc_shared_address + BWFM_SHARED_DMA_RINGUPD_ADDR_HIGH,
695 	    BWFM_PCI_DMA_DVA(sc->sc_ringupd_buf) >> 32);
696 	bus_space_write_4(sc->sc_tcm_iot, sc->sc_tcm_ioh,
697 	    sc->sc_shared_address + BWFM_SHARED_DMA_RINGUPD_LEN,
698 	    BWFM_DMA_D2H_RINGUPD_BUF_LEN);
699 
700 	if ((sc->sc_ioctl_buf = bwfm_pci_dmamem_alloc(sc,
701 	    BWFM_DMA_H2D_IOCTL_BUF_LEN, 8)) == NULL)
702 		goto cleanup;
703 
704 	if (workqueue_create(&sc->flowring_wq, "bwfmflow",
705 	    bwfm_pci_flowring_create_cb, sc, PRI_SOFTNET, IPL_NET, 0))
706 		goto cleanup;
707 
708 	bwfm_pci_select_core(sc, BWFM_AGENT_CORE_PCIE2);
709 	bwfm_pci_intr_enable(sc);
710 
711 	/* Maps RX mbufs to a packet id and back. */
712 	sc->sc_rx_pkts.npkt = BWFM_NUM_RX_PKTIDS;
713 	sc->sc_rx_pkts.pkts = kmem_zalloc(BWFM_NUM_RX_PKTIDS *
714 	    sizeof(struct bwfm_pci_buf), KM_SLEEP);
715 	for (i = 0; i < BWFM_NUM_RX_PKTIDS; i++)
716 		bus_dmamap_create(sc->sc_dmat, MSGBUF_MAX_PKT_SIZE,
717 		    BWFM_NUM_RX_DESCS, MSGBUF_MAX_PKT_SIZE, 0, BUS_DMA_WAITOK,
718 		    &sc->sc_rx_pkts.pkts[i].bb_map);
719 
720 	/* Maps TX mbufs to a packet id and back. */
721 	sc->sc_tx_pkts.npkt = BWFM_NUM_TX_PKTIDS;
722 	sc->sc_tx_pkts.pkts = kmem_zalloc(BWFM_NUM_TX_PKTIDS
723 	    * sizeof(struct bwfm_pci_buf), KM_SLEEP);
724 	for (i = 0; i < BWFM_NUM_TX_PKTIDS; i++)
725 		bus_dmamap_create(sc->sc_dmat, MSGBUF_MAX_PKT_SIZE,
726 		    BWFM_NUM_TX_DESCS, MSGBUF_MAX_PKT_SIZE, 0, BUS_DMA_WAITOK,
727 		    &sc->sc_tx_pkts.pkts[i].bb_map);
728 
729 	/*
730 	 * For whatever reason, could also be a bug somewhere in this
731 	 * driver, the firmware needs a bunch of RX buffers otherwise
732 	 * it won't send any RX complete messages.  64 buffers don't
733 	 * suffice, but 128 buffers are enough.
734 	 */
735 	if_rxr_init(&sc->sc_rxbuf_ring, 128, sc->sc_max_rxbufpost);
736 	if_rxr_init(&sc->sc_ioctl_ring, 8, 8);
737 	if_rxr_init(&sc->sc_event_ring, 8, 8);
738 	bwfm_pci_fill_rx_rings(sc);
739 
740 
741 #ifdef BWFM_DEBUG
742 	sc->sc_console_readidx = 0;
743 	bwfm_pci_debug_console(sc);
744 #endif
745 
746 	sc->sc_ioctl_poll = 1;
747 	sc->sc_sc.sc_bus_ops = &bwfm_pci_bus_ops;
748 	sc->sc_sc.sc_proto_ops = &bwfm_pci_msgbuf_ops;
749 	bwfm_attach(&sc->sc_sc);
750 	sc->sc_ioctl_poll = 0;
751 	return;
752 
753 cleanup:
754 	if (sc->flowring_wq != NULL)
755 		workqueue_destroy(sc->flowring_wq);
756 	if (sc->sc_ih != NULL) {
757 		pci_intr_disestablish(sc->sc_pc, sc->sc_ih);
758 		pci_intr_release(sc->sc_pc, sc->sc_pihp, 1);
759 	}
760 	if (sc->sc_ioctl_buf)
761 		bwfm_pci_dmamem_free(sc, sc->sc_ioctl_buf);
762 	if (sc->sc_ringupd_buf)
763 		bwfm_pci_dmamem_free(sc, sc->sc_ringupd_buf);
764 	if (sc->sc_scratch_buf)
765 		bwfm_pci_dmamem_free(sc, sc->sc_scratch_buf);
766 	if (sc->sc_rx_complete.ring)
767 		bwfm_pci_dmamem_free(sc, sc->sc_rx_complete.ring);
768 	if (sc->sc_tx_complete.ring)
769 		bwfm_pci_dmamem_free(sc, sc->sc_tx_complete.ring);
770 	if (sc->sc_ctrl_complete.ring)
771 		bwfm_pci_dmamem_free(sc, sc->sc_ctrl_complete.ring);
772 	if (sc->sc_rxpost_submit.ring)
773 		bwfm_pci_dmamem_free(sc, sc->sc_rxpost_submit.ring);
774 	if (sc->sc_ctrl_submit.ring)
775 		bwfm_pci_dmamem_free(sc, sc->sc_ctrl_submit.ring);
776 	if (sc->sc_dma_idx_buf)
777 		bwfm_pci_dmamem_free(sc, sc->sc_dma_idx_buf);
778 
779  err:
780 	bwfm_firmware_close(&fwctx);
781 }
782 
783 int
784 bwfm_pci_load_microcode(struct bwfm_pci_softc *sc, const u_char *ucode, size_t size)
785 {
786 	struct bwfm_softc *bwfm = (void *)sc;
787 	struct bwfm_core *core;
788 	uint32_t shared;
789 	int i;
790 
791 	if (bwfm->sc_chip.ch_chip == BRCM_CC_43602_CHIP_ID) {
792 		bwfm_pci_select_core(sc, BWFM_AGENT_CORE_ARM_CR4);
793 		bus_space_write_4(sc->sc_reg_iot, sc->sc_reg_ioh,
794 		    BWFM_PCI_ARMCR4REG_BANKIDX, 5);
795 		bus_space_write_4(sc->sc_reg_iot, sc->sc_reg_ioh,
796 		    BWFM_PCI_ARMCR4REG_BANKPDA, 0);
797 		bus_space_write_4(sc->sc_reg_iot, sc->sc_reg_ioh,
798 		    BWFM_PCI_ARMCR4REG_BANKIDX, 7);
799 		bus_space_write_4(sc->sc_reg_iot, sc->sc_reg_ioh,
800 		    BWFM_PCI_ARMCR4REG_BANKPDA, 0);
801 	}
802 
803 	for (i = 0; i < size; i++)
804 		bus_space_write_1(sc->sc_tcm_iot, sc->sc_tcm_ioh,
805 		    bwfm->sc_chip.ch_rambase + i, ucode[i]);
806 
807 	/* Firmware replaces this with a pointer once up. */
808 	bus_space_write_4(sc->sc_tcm_iot, sc->sc_tcm_ioh,
809 	    bwfm->sc_chip.ch_rambase + bwfm->sc_chip.ch_ramsize - 4, 0);
810 
811 	/* TODO: restore NVRAM */
812 
813 	/* Load reset vector from firmware and kickstart core. */
814 	if (bwfm->sc_chip.ch_chip == BRCM_CC_43602_CHIP_ID) {
815 		core = bwfm_chip_get_core(bwfm, BWFM_AGENT_INTERNAL_MEM);
816 		bwfm->sc_chip.ch_core_reset(bwfm, core, 0, 0, 0);
817 	}
818 	bwfm_chip_set_active(bwfm, *(const uint32_t *)ucode);
819 
820 	for (i = 0; i < 40; i++) {
821 		delay(50 * 1000);
822 		shared = bus_space_read_4(sc->sc_tcm_iot, sc->sc_tcm_ioh,
823 		    bwfm->sc_chip.ch_rambase + bwfm->sc_chip.ch_ramsize - 4);
824 		if (shared)
825 			break;
826 	}
827 	if (!shared) {
828 		printf("%s: firmware did not come up\n", DEVNAME(sc));
829 		return 1;
830 	}
831 
832 	sc->sc_shared_address = shared;
833 	return 0;
834 }
835 
836 int
837 bwfm_pci_detach(device_t self, int flags)
838 {
839 	struct bwfm_pci_softc *sc = device_private(self);
840 
841 	bwfm_detach(&sc->sc_sc, flags);
842 
843 	/* FIXME: free RX buffers */
844 	/* FIXME: free TX buffers */
845 	/* FIXME: free more memory */
846 
847 	kmem_free(sc->sc_flowrings, sc->sc_max_flowrings
848 	    * sizeof(struct bwfm_pci_msgring));
849 	pool_destroy(&sc->sc_flowring_pool);
850 
851 	workqueue_destroy(sc->flowring_wq);
852 	pci_intr_disestablish(sc->sc_pc, sc->sc_ih);
853 	pci_intr_release(sc->sc_pc, sc->sc_pihp, 1);
854 	bwfm_pci_dmamem_free(sc, sc->sc_ioctl_buf);
855 	bwfm_pci_dmamem_free(sc, sc->sc_ringupd_buf);
856 	bwfm_pci_dmamem_free(sc, sc->sc_scratch_buf);
857 	bwfm_pci_dmamem_free(sc, sc->sc_rx_complete.ring);
858 	bwfm_pci_dmamem_free(sc, sc->sc_tx_complete.ring);
859 	bwfm_pci_dmamem_free(sc, sc->sc_ctrl_complete.ring);
860 	bwfm_pci_dmamem_free(sc, sc->sc_rxpost_submit.ring);
861 	bwfm_pci_dmamem_free(sc, sc->sc_ctrl_submit.ring);
862 	bwfm_pci_dmamem_free(sc, sc->sc_dma_idx_buf);
863 	return 0;
864 }
865 
866 /* DMA code */
867 struct bwfm_pci_dmamem *
868 bwfm_pci_dmamem_alloc(struct bwfm_pci_softc *sc, bus_size_t size, bus_size_t align)
869 {
870 	struct bwfm_pci_dmamem *bdm;
871 	int nsegs;
872 
873 	bdm = kmem_zalloc(sizeof(*bdm), KM_SLEEP);
874 	bdm->bdm_size = size;
875 
876 	if (bus_dmamap_create(sc->sc_dmat, size, 1, size, 0,
877 	    BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW, &bdm->bdm_map) != 0)
878 		goto bdmfree;
879 
880 	if (bus_dmamem_alloc(sc->sc_dmat, size, align, 0, &bdm->bdm_seg, 1,
881 	    &nsegs, BUS_DMA_WAITOK) != 0)
882 		goto destroy;
883 
884 	if (bus_dmamem_map(sc->sc_dmat, &bdm->bdm_seg, nsegs, size,
885 	    (void **) &bdm->bdm_kva, BUS_DMA_WAITOK | BUS_DMA_COHERENT) != 0)
886 		goto free;
887 
888 	if (bus_dmamap_load(sc->sc_dmat, bdm->bdm_map, bdm->bdm_kva, size,
889 	    NULL, BUS_DMA_WAITOK) != 0)
890 		goto unmap;
891 
892 	bzero(bdm->bdm_kva, size);
893 
894 	return (bdm);
895 
896 unmap:
897 	bus_dmamem_unmap(sc->sc_dmat, bdm->bdm_kva, size);
898 free:
899 	bus_dmamem_free(sc->sc_dmat, &bdm->bdm_seg, 1);
900 destroy:
901 	bus_dmamap_destroy(sc->sc_dmat, bdm->bdm_map);
902 bdmfree:
903 	kmem_free(bdm, sizeof(*bdm));
904 
905 	return (NULL);
906 }
907 
908 void
909 bwfm_pci_dmamem_free(struct bwfm_pci_softc *sc, struct bwfm_pci_dmamem *bdm)
910 {
911 	bus_dmamem_unmap(sc->sc_dmat, bdm->bdm_kva, bdm->bdm_size);
912 	bus_dmamem_free(sc->sc_dmat, &bdm->bdm_seg, 1);
913 	bus_dmamap_destroy(sc->sc_dmat, bdm->bdm_map);
914 	kmem_free(bdm, sizeof(*bdm));
915 }
916 
917 /*
918  * We need a simple mapping from a packet ID to mbufs, because when
919  * a transfer completed, we only know the ID so we have to look up
920  * the memory for the ID.  This simply looks for an empty slot.
921  */
922 int
923 bwfm_pci_pktid_avail(struct bwfm_pci_softc *sc, struct bwfm_pci_pkts *pkts)
924 {
925 	int i, idx;
926 
927 	idx = pkts->last + 1;
928 	for (i = 0; i < pkts->npkt; i++) {
929 		if (idx == pkts->npkt)
930 			idx = 0;
931 		if (pkts->pkts[idx].bb_m == NULL)
932 			return 0;
933 		idx++;
934 	}
935 	return ENOBUFS;
936 }
937 
938 int
939 bwfm_pci_pktid_new(struct bwfm_pci_softc *sc, struct bwfm_pci_pkts *pkts,
940     struct mbuf **mp, uint32_t *pktid, paddr_t *paddr)
941 {
942 	int i, idx;
943 
944 	idx = pkts->last + 1;
945 	for (i = 0; i < pkts->npkt; i++) {
946 		if (idx == pkts->npkt)
947 			idx = 0;
948 		if (pkts->pkts[idx].bb_m == NULL) {
949 			if (bus_dmamap_load_mbuf(sc->sc_dmat,
950 			    pkts->pkts[idx].bb_map, *mp, BUS_DMA_NOWAIT) != 0) {
951 				/*
952 				 * Didn't fit.  Maybe it has too many
953 				 * segments.  If it has only one
954 				 * segment, fail; otherwise try to
955 				 * compact it into a single mbuf
956 				 * segment.
957 				 */
958 				if ((*mp)->m_next == NULL)
959 					return ENOBUFS;
960 				struct mbuf *m0 = MCLGETI(NULL, M_DONTWAIT,
961 				    NULL, MSGBUF_MAX_PKT_SIZE);
962 				if (m0 == NULL)
963 					return ENOBUFS;
964 				m_copydata(*mp, 0, (*mp)->m_pkthdr.len,
965 				    mtod(m0, void *));
966 				m0->m_pkthdr.len = m0->m_len =
967 				    (*mp)->m_pkthdr.len;
968 				m_freem(*mp);
969 				*mp = m0;
970 				if (bus_dmamap_load_mbuf(sc->sc_dmat,
971 				    pkts->pkts[idx].bb_map, *mp, BUS_DMA_NOWAIT) != 0)
972 					return EFBIG;
973 			}
974 			bus_dmamap_sync(sc->sc_dmat, pkts->pkts[idx].bb_map,
975 			    0, pkts->pkts[idx].bb_map->dm_mapsize,
976 			    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
977 			pkts->last = idx;
978 			pkts->pkts[idx].bb_m = *mp;
979 			*pktid = idx;
980 			*paddr = pkts->pkts[idx].bb_map->dm_segs[0].ds_addr;
981 			return 0;
982 		}
983 		idx++;
984 	}
985 	return ENOBUFS;
986 }
987 
988 struct mbuf *
989 bwfm_pci_pktid_free(struct bwfm_pci_softc *sc, struct bwfm_pci_pkts *pkts,
990     uint32_t pktid)
991 {
992 	struct mbuf *m;
993 
994 	if (pktid >= pkts->npkt || pkts->pkts[pktid].bb_m == NULL)
995 		return NULL;
996 	bus_dmamap_sync(sc->sc_dmat, pkts->pkts[pktid].bb_map, 0,
997 	    pkts->pkts[pktid].bb_map->dm_mapsize,
998 	    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
999 	bus_dmamap_unload(sc->sc_dmat, pkts->pkts[pktid].bb_map);
1000 	m = pkts->pkts[pktid].bb_m;
1001 	pkts->pkts[pktid].bb_m = NULL;
1002 	return m;
1003 }
1004 
1005 void
1006 bwfm_pci_fill_rx_rings(struct bwfm_pci_softc *sc)
1007 {
1008 	bwfm_pci_fill_rx_buf_ring(sc);
1009 	bwfm_pci_fill_rx_ioctl_ring(sc, &sc->sc_ioctl_ring,
1010 	    MSGBUF_TYPE_IOCTLRESP_BUF_POST);
1011 	bwfm_pci_fill_rx_ioctl_ring(sc, &sc->sc_event_ring,
1012 	    MSGBUF_TYPE_EVENT_BUF_POST);
1013 }
1014 
1015 void
1016 bwfm_pci_fill_rx_ioctl_ring(struct bwfm_pci_softc *sc, struct if_rxring *rxring,
1017     uint32_t msgtype)
1018 {
1019 	struct msgbuf_rx_ioctl_resp_or_event *req;
1020 	struct mbuf *m;
1021 	uint32_t pktid;
1022 	paddr_t paddr;
1023 	int s, slots;
1024 	uint64_t devaddr;
1025 
1026 	s = splnet();
1027 	for (slots = if_rxr_get(rxring, 8); slots > 0; slots--) {
1028 		if (bwfm_pci_pktid_avail(sc, &sc->sc_rx_pkts))
1029 			break;
1030 		req = bwfm_pci_ring_write_reserve(sc, &sc->sc_ctrl_submit);
1031 		if (req == NULL)
1032 			break;
1033 		m = MCLGETI(NULL, M_DONTWAIT, NULL, MSGBUF_MAX_PKT_SIZE);
1034 		if (m == NULL) {
1035 			bwfm_pci_ring_write_cancel(sc, &sc->sc_ctrl_submit, 1);
1036 			break;
1037 		}
1038 		m->m_len = m->m_pkthdr.len = MSGBUF_MAX_PKT_SIZE;
1039 		if (bwfm_pci_pktid_new(sc, &sc->sc_rx_pkts, &m, &pktid, &paddr)) {
1040 			bwfm_pci_ring_write_cancel(sc, &sc->sc_ctrl_submit, 1);
1041 			m_freem(m);
1042 			break;
1043 		}
1044 		devaddr = paddr;
1045 		memset(req, 0, sizeof(*req));
1046 		req->msg.msgtype = msgtype;
1047 		req->msg.request_id = htole32(pktid);
1048 		req->host_buf_len = htole16(MSGBUF_MAX_PKT_SIZE);
1049 		req->host_buf_addr.high_addr = htole32(devaddr >> 32);
1050 		req->host_buf_addr.low_addr = htole32(devaddr & 0xffffffff);
1051 		bwfm_pci_ring_write_commit(sc, &sc->sc_ctrl_submit);
1052 	}
1053 	if_rxr_put(rxring, slots);
1054 	splx(s);
1055 }
1056 
1057 void
1058 bwfm_pci_fill_rx_buf_ring(struct bwfm_pci_softc *sc)
1059 {
1060 	struct msgbuf_rx_bufpost *req;
1061 	struct mbuf *m;
1062 	uint32_t pktid;
1063 	paddr_t paddr;
1064 	int s, slots;
1065 	uint64_t devaddr;
1066 
1067 	s = splnet();
1068 	for (slots = if_rxr_get(&sc->sc_rxbuf_ring, sc->sc_max_rxbufpost);
1069 	    slots > 0; slots--) {
1070 		if (bwfm_pci_pktid_avail(sc, &sc->sc_rx_pkts))
1071 			break;
1072 		req = bwfm_pci_ring_write_reserve(sc, &sc->sc_rxpost_submit);
1073 		if (req == NULL)
1074 			break;
1075 		m = MCLGETI(NULL, M_DONTWAIT, NULL, MSGBUF_MAX_PKT_SIZE);
1076 		if (m == NULL) {
1077 			bwfm_pci_ring_write_cancel(sc, &sc->sc_rxpost_submit, 1);
1078 			break;
1079 		}
1080 		m->m_len = m->m_pkthdr.len = MSGBUF_MAX_PKT_SIZE;
1081 		if (bwfm_pci_pktid_new(sc, &sc->sc_rx_pkts, &m, &pktid, &paddr)) {
1082 			bwfm_pci_ring_write_cancel(sc, &sc->sc_rxpost_submit, 1);
1083 			m_freem(m);
1084 			break;
1085 		}
1086 		devaddr = paddr;
1087 		memset(req, 0, sizeof(*req));
1088 		req->msg.msgtype = MSGBUF_TYPE_RXBUF_POST;
1089 		req->msg.request_id = htole32(pktid);
1090 		req->data_buf_len = htole16(MSGBUF_MAX_PKT_SIZE);
1091 		req->data_buf_addr.high_addr = htole32(devaddr >> 32);
1092 		req->data_buf_addr.low_addr = htole32(devaddr & 0xffffffff);
1093 		bwfm_pci_ring_write_commit(sc, &sc->sc_rxpost_submit);
1094 	}
1095 	if_rxr_put(&sc->sc_rxbuf_ring, slots);
1096 	splx(s);
1097 }
1098 
1099 int
1100 bwfm_pci_setup_ring(struct bwfm_pci_softc *sc, struct bwfm_pci_msgring *ring,
1101     int nitem, size_t itemsz, uint32_t w_idx, uint32_t r_idx,
1102     int idx, uint32_t idx_off, uint32_t *ring_mem)
1103 {
1104 	ring->w_idx_addr = w_idx + idx * idx_off;
1105 	ring->r_idx_addr = r_idx + idx * idx_off;
1106 	ring->nitem = nitem;
1107 	ring->itemsz = itemsz;
1108 	bwfm_pci_ring_write_rptr(sc, ring);
1109 	bwfm_pci_ring_write_wptr(sc, ring);
1110 
1111 	ring->ring = bwfm_pci_dmamem_alloc(sc, nitem * itemsz, 8);
1112 	if (ring->ring == NULL)
1113 		return ENOMEM;
1114 	bus_space_write_4(sc->sc_tcm_iot, sc->sc_tcm_ioh,
1115 	    *ring_mem + BWFM_RING_MEM_BASE_ADDR_LOW,
1116 	    BWFM_PCI_DMA_DVA(ring->ring) & 0xffffffff);
1117 	bus_space_write_4(sc->sc_tcm_iot, sc->sc_tcm_ioh,
1118 	    *ring_mem + BWFM_RING_MEM_BASE_ADDR_HIGH,
1119 	    BWFM_PCI_DMA_DVA(ring->ring) >> 32);
1120 	bus_space_write_2(sc->sc_tcm_iot, sc->sc_tcm_ioh,
1121 	    *ring_mem + BWFM_RING_MAX_ITEM, nitem);
1122 	bus_space_write_2(sc->sc_tcm_iot, sc->sc_tcm_ioh,
1123 	    *ring_mem + BWFM_RING_LEN_ITEMS, itemsz);
1124 	*ring_mem = *ring_mem + BWFM_RING_MEM_SZ;
1125 	return 0;
1126 }
1127 
1128 int
1129 bwfm_pci_setup_flowring(struct bwfm_pci_softc *sc, struct bwfm_pci_msgring *ring,
1130     int nitem, size_t itemsz)
1131 {
1132 	ring->w_ptr = 0;
1133 	ring->r_ptr = 0;
1134 	ring->nitem = nitem;
1135 	ring->itemsz = itemsz;
1136 	bwfm_pci_ring_write_rptr(sc, ring);
1137 	bwfm_pci_ring_write_wptr(sc, ring);
1138 
1139 	ring->ring = bwfm_pci_dmamem_alloc(sc, nitem * itemsz, 8);
1140 	if (ring->ring == NULL)
1141 		return ENOMEM;
1142 	return 0;
1143 }
1144 
1145 /* Ring helpers */
1146 void
1147 bwfm_pci_ring_bell(struct bwfm_pci_softc *sc,
1148     struct bwfm_pci_msgring *ring)
1149 {
1150 	bus_space_write_4(sc->sc_reg_iot, sc->sc_reg_ioh,
1151 	    BWFM_PCI_PCIE2REG_H2D_MAILBOX, 1);
1152 }
1153 
1154 void
1155 bwfm_pci_ring_update_rptr(struct bwfm_pci_softc *sc,
1156     struct bwfm_pci_msgring *ring)
1157 {
1158 	if (sc->sc_dma_idx_sz == 0) {
1159 		ring->r_ptr = bus_space_read_2(sc->sc_tcm_iot,
1160 		    sc->sc_tcm_ioh, ring->r_idx_addr);
1161 	} else {
1162 		bus_dmamap_sync(sc->sc_dmat,
1163 		    BWFM_PCI_DMA_MAP(sc->sc_dma_idx_buf), ring->r_idx_addr,
1164 		    sizeof(uint16_t), BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1165 		ring->r_ptr = *(uint16_t *)(BWFM_PCI_DMA_KVA(sc->sc_dma_idx_buf)
1166 		    + ring->r_idx_addr);
1167 	}
1168 }
1169 
1170 static u_int
1171 if_rxr_get(struct if_rxring *rxr, unsigned int max)
1172 {
1173 	u_int taken = MIN(max, (rxr->rxr_total - rxr->rxr_inuse));
1174 
1175 	KASSERTMSG(rxr->rxr_inuse + taken <= rxr->rxr_total,
1176 			"rxr->rxr_inuse: %d\n"
1177 			"taken: %d\n"
1178 			"rxr->rxr_total: %d\n",
1179 			rxr->rxr_inuse, taken, rxr->rxr_total);
1180 	rxr->rxr_inuse += taken;
1181 
1182 	return taken;
1183 }
1184 
1185 static void
1186 if_rxr_put(struct if_rxring *rxr, unsigned int n)
1187 {
1188 	KASSERTMSG(rxr->rxr_inuse >= n,
1189 			"rxr->rxr_inuse: %d\n"
1190 			"n: %d\n"
1191 			"rxr->rxr_total: %d\n",
1192 			rxr->rxr_inuse, n, rxr->rxr_total);
1193 
1194 	rxr->rxr_inuse -= n;
1195 }
1196 
1197 static void
1198 if_rxr_init(struct if_rxring *rxr, unsigned int lwm __unused, unsigned int hwm)
1199 {
1200 	(void) lwm;
1201 
1202 	rxr->rxr_total = hwm;
1203 	rxr->rxr_inuse = 0;
1204 }
1205 
1206 void
1207 bwfm_pci_ring_update_wptr(struct bwfm_pci_softc *sc,
1208     struct bwfm_pci_msgring *ring)
1209 {
1210 	if (sc->sc_dma_idx_sz == 0) {
1211 		ring->w_ptr = bus_space_read_2(sc->sc_tcm_iot,
1212 		    sc->sc_tcm_ioh, ring->w_idx_addr);
1213 	} else {
1214 		ring->w_ptr = *(uint16_t *)(BWFM_PCI_DMA_KVA(sc->sc_dma_idx_buf)
1215 		    + ring->w_idx_addr);
1216 		bus_dmamap_sync(sc->sc_dmat,
1217 		    BWFM_PCI_DMA_MAP(sc->sc_dma_idx_buf), ring->w_idx_addr,
1218 		    sizeof(uint16_t), BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1219 	}
1220 }
1221 
1222 void
1223 bwfm_pci_ring_write_rptr(struct bwfm_pci_softc *sc,
1224     struct bwfm_pci_msgring *ring)
1225 {
1226 	if (sc->sc_dma_idx_sz == 0) {
1227 		bus_space_write_2(sc->sc_tcm_iot, sc->sc_tcm_ioh,
1228 		    ring->r_idx_addr, ring->r_ptr);
1229 	} else {
1230 		*(uint16_t *)(BWFM_PCI_DMA_KVA(sc->sc_dma_idx_buf)
1231 		    + ring->r_idx_addr) = ring->r_ptr;
1232 		bus_dmamap_sync(sc->sc_dmat,
1233 		    BWFM_PCI_DMA_MAP(sc->sc_dma_idx_buf), ring->r_idx_addr,
1234 		    sizeof(uint16_t), BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1235 	}
1236 }
1237 
1238 void
1239 bwfm_pci_ring_write_wptr(struct bwfm_pci_softc *sc,
1240     struct bwfm_pci_msgring *ring)
1241 {
1242 	if (sc->sc_dma_idx_sz == 0) {
1243 		bus_space_write_2(sc->sc_tcm_iot, sc->sc_tcm_ioh,
1244 		    ring->w_idx_addr, ring->w_ptr);
1245 	} else {
1246 		*(uint16_t *)(BWFM_PCI_DMA_KVA(sc->sc_dma_idx_buf)
1247 		    + ring->w_idx_addr) = ring->w_ptr;
1248 		bus_dmamap_sync(sc->sc_dmat,
1249 		    BWFM_PCI_DMA_MAP(sc->sc_dma_idx_buf), ring->w_idx_addr,
1250 		    sizeof(uint16_t), BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1251 	}
1252 }
1253 
1254 /*
1255  * Retrieve a free descriptor to put new stuff in, but don't commit
1256  * to it yet so we can rollback later if any error occurs.
1257  */
1258 void *
1259 bwfm_pci_ring_write_reserve(struct bwfm_pci_softc *sc,
1260     struct bwfm_pci_msgring *ring)
1261 {
1262 	int available;
1263 	char *ret;
1264 
1265 	bwfm_pci_ring_update_rptr(sc, ring);
1266 
1267 	if (ring->r_ptr > ring->w_ptr)
1268 		available = ring->r_ptr - ring->w_ptr;
1269 	else
1270 		available = ring->r_ptr + (ring->nitem - ring->w_ptr);
1271 
1272 	if (available < 1)
1273 		return NULL;
1274 
1275 	ret = BWFM_PCI_DMA_KVA(ring->ring) + (ring->w_ptr * ring->itemsz);
1276 	ring->w_ptr += 1;
1277 	if (ring->w_ptr == ring->nitem)
1278 		ring->w_ptr = 0;
1279 	return ret;
1280 }
1281 
1282 void *
1283 bwfm_pci_ring_write_reserve_multi(struct bwfm_pci_softc *sc,
1284     struct bwfm_pci_msgring *ring, int count, int *avail)
1285 {
1286 	int available;
1287 	char *ret;
1288 
1289 	bwfm_pci_ring_update_rptr(sc, ring);
1290 
1291 	if (ring->r_ptr > ring->w_ptr)
1292 		available = ring->r_ptr - ring->w_ptr;
1293 	else
1294 		available = ring->r_ptr + (ring->nitem - ring->w_ptr);
1295 
1296 	if (available < 1)
1297 		return NULL;
1298 
1299 	ret = BWFM_PCI_DMA_KVA(ring->ring) + (ring->w_ptr * ring->itemsz);
1300 	*avail = uimin(count, available - 1);
1301 	if (*avail + ring->w_ptr > ring->nitem)
1302 		*avail = ring->nitem - ring->w_ptr;
1303 	ring->w_ptr += *avail;
1304 	if (ring->w_ptr == ring->nitem)
1305 		ring->w_ptr = 0;
1306 	return ret;
1307 }
1308 
1309 /*
1310  * Read number of descriptors available (submitted by the firmware)
1311  * and retrieve pointer to first descriptor.
1312  */
1313 void *
1314 bwfm_pci_ring_read_avail(struct bwfm_pci_softc *sc,
1315     struct bwfm_pci_msgring *ring, int *avail)
1316 {
1317 	bwfm_pci_ring_update_wptr(sc, ring);
1318 
1319 	if (ring->w_ptr >= ring->r_ptr)
1320 		*avail = ring->w_ptr - ring->r_ptr;
1321 	else
1322 		*avail = ring->nitem - ring->r_ptr;
1323 
1324 	if (*avail == 0)
1325 		return NULL;
1326 	bus_dmamap_sync(sc->sc_dmat, BWFM_PCI_DMA_MAP(ring->ring),
1327 	    ring->r_ptr * ring->itemsz, *avail * ring->itemsz,
1328 	    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1329 	return BWFM_PCI_DMA_KVA(ring->ring) + (ring->r_ptr * ring->itemsz);
1330 }
1331 
1332 /*
1333  * Let firmware know we read N descriptors.
1334  */
1335 void
1336 bwfm_pci_ring_read_commit(struct bwfm_pci_softc *sc,
1337     struct bwfm_pci_msgring *ring, int nitem)
1338 {
1339 	ring->r_ptr += nitem;
1340 	if (ring->r_ptr == ring->nitem)
1341 		ring->r_ptr = 0;
1342 	bwfm_pci_ring_write_rptr(sc, ring);
1343 }
1344 
1345 /*
1346  * Let firmware know that we submitted some descriptors.
1347  */
1348 void
1349 bwfm_pci_ring_write_commit(struct bwfm_pci_softc *sc,
1350     struct bwfm_pci_msgring *ring)
1351 {
1352 	bus_dmamap_sync(sc->sc_dmat, BWFM_PCI_DMA_MAP(ring->ring),
1353 	    0, BWFM_PCI_DMA_LEN(ring->ring), BUS_DMASYNC_PREREAD |
1354 	    BUS_DMASYNC_PREWRITE);
1355 	bwfm_pci_ring_write_wptr(sc, ring);
1356 	bwfm_pci_ring_bell(sc, ring);
1357 }
1358 
1359 /*
1360  * Rollback N descriptors in case we don't actually want
1361  * to commit to it.
1362  */
1363 void
1364 bwfm_pci_ring_write_cancel(struct bwfm_pci_softc *sc,
1365     struct bwfm_pci_msgring *ring, int nitem)
1366 {
1367 	if (ring->w_ptr == 0)
1368 		ring->w_ptr = ring->nitem - nitem;
1369 	else
1370 		ring->w_ptr -= nitem;
1371 }
1372 
1373 /*
1374  * Foreach written descriptor on the ring, pass the descriptor to
1375  * a message handler and let the firmware know we handled it.
1376  */
1377 void
1378 bwfm_pci_ring_rx(struct bwfm_pci_softc *sc, struct bwfm_pci_msgring *ring)
1379 {
1380 	char *buf;
1381 	int avail, processed;
1382 
1383 again:
1384 	buf = bwfm_pci_ring_read_avail(sc, ring, &avail);
1385 	if (buf == NULL)
1386 		return;
1387 
1388 	processed = 0;
1389 	while (avail) {
1390 		bwfm_pci_msg_rx(sc, buf + sc->sc_rx_dataoffset);
1391 		buf += ring->itemsz;
1392 		processed++;
1393 		if (processed == 48) {
1394 			bwfm_pci_ring_read_commit(sc, ring, processed);
1395 			processed = 0;
1396 		}
1397 		avail--;
1398 	}
1399 	if (processed)
1400 		bwfm_pci_ring_read_commit(sc, ring, processed);
1401 	if (ring->r_ptr == 0)
1402 		goto again;
1403 }
1404 
1405 void
1406 bwfm_pci_msg_rx(struct bwfm_pci_softc *sc, void *buf)
1407 {
1408 	struct ifnet *ifp = sc->sc_sc.sc_ic.ic_ifp;
1409 	struct msgbuf_ioctl_resp_hdr *resp;
1410 	struct msgbuf_tx_status *tx;
1411 	struct msgbuf_rx_complete *rx;
1412 	struct msgbuf_rx_event *event;
1413 	struct msgbuf_common_hdr *msg;
1414 	struct msgbuf_flowring_create_resp *fcr;
1415 	struct msgbuf_flowring_delete_resp *fdr;
1416 	struct bwfm_pci_msgring *ring;
1417 	struct mbuf *m;
1418 	int flowid;
1419 
1420 	msg = (struct msgbuf_common_hdr *)buf;
1421 	switch (msg->msgtype)
1422 	{
1423 	case MSGBUF_TYPE_FLOW_RING_CREATE_CMPLT:
1424 		fcr = (struct msgbuf_flowring_create_resp *)buf;
1425 		flowid = letoh16(fcr->compl_hdr.flow_ring_id);
1426 		if (flowid < 2)
1427 			break;
1428 		flowid -= 2;
1429 		if (flowid >= sc->sc_max_flowrings)
1430 			break;
1431 		ring = &sc->sc_flowrings[flowid];
1432 		if (ring->status != RING_OPENING)
1433 			break;
1434 		if (fcr->compl_hdr.status) {
1435 			printf("%s: failed to open flowring %d\n",
1436 			    DEVNAME(sc), flowid);
1437 			ring->status = RING_CLOSED;
1438 			if (ring->m) {
1439 				m_freem(ring->m);
1440 				ring->m = NULL;
1441 			}
1442 			ifp->if_flags &= ~IFF_OACTIVE;
1443 			ifp->if_start(ifp);
1444 			break;
1445 		}
1446 		ring->status = RING_OPEN;
1447 		if (ring->m != NULL) {
1448 			m = ring->m;
1449 			ring->m = NULL;
1450 			if (bwfm_pci_txdata(&sc->sc_sc, &m))
1451 				m_freem(ring->m);
1452 		}
1453 		ifp->if_flags &= ~IFF_OACTIVE;
1454 		ifp->if_start(ifp);
1455 		break;
1456 	case MSGBUF_TYPE_FLOW_RING_DELETE_CMPLT:
1457 		fdr = (struct msgbuf_flowring_delete_resp *)buf;
1458 		flowid = letoh16(fdr->compl_hdr.flow_ring_id);
1459 		if (flowid < 2)
1460 			break;
1461 		flowid -= 2;
1462 		if (flowid >= sc->sc_max_flowrings)
1463 			break;
1464 		ring = &sc->sc_flowrings[flowid];
1465 		if (ring->status != RING_CLOSING)
1466 			break;
1467 		if (fdr->compl_hdr.status) {
1468 			printf("%s: failed to delete flowring %d\n",
1469 			    DEVNAME(sc), flowid);
1470 			break;
1471 		}
1472 		bwfm_pci_dmamem_free(sc, ring->ring);
1473 		ring->status = RING_CLOSED;
1474 		break;
1475 	case MSGBUF_TYPE_IOCTLPTR_REQ_ACK:
1476 		break;
1477 	case MSGBUF_TYPE_IOCTL_CMPLT:
1478 		resp = (struct msgbuf_ioctl_resp_hdr *)buf;
1479 		sc->sc_ioctl_resp_pktid = letoh32(resp->msg.request_id);
1480 		sc->sc_ioctl_resp_ret_len = letoh16(resp->resp_len);
1481 		sc->sc_ioctl_resp_status = letoh16(resp->compl_hdr.status);
1482 		if_rxr_put(&sc->sc_ioctl_ring, 1);
1483 		bwfm_pci_fill_rx_rings(sc);
1484 		wakeup(&sc->sc_ioctl_buf);
1485 		break;
1486 	case MSGBUF_TYPE_WL_EVENT:
1487 		event = (struct msgbuf_rx_event *)buf;
1488 		m = bwfm_pci_pktid_free(sc, &sc->sc_rx_pkts,
1489 		    letoh32(event->msg.request_id));
1490 		if (m == NULL)
1491 			break;
1492 		m_adj(m, sc->sc_rx_dataoffset);
1493 		m->m_len = m->m_pkthdr.len = letoh16(event->event_data_len);
1494 		bwfm_rx(&sc->sc_sc, m);
1495 		if_rxr_put(&sc->sc_event_ring, 1);
1496 		bwfm_pci_fill_rx_rings(sc);
1497 		break;
1498 	case MSGBUF_TYPE_TX_STATUS:
1499 		tx = (struct msgbuf_tx_status *)buf;
1500 		m = bwfm_pci_pktid_free(sc, &sc->sc_tx_pkts,
1501 		    letoh32(tx->msg.request_id));
1502 		if (m == NULL)
1503 			break;
1504 		m_freem(m);
1505 		if (sc->sc_tx_pkts_full) {
1506 			sc->sc_tx_pkts_full = 0;
1507 			ifp->if_flags &= ~IFF_OACTIVE;
1508 			ifp->if_start(ifp);
1509 		}
1510 		break;
1511 	case MSGBUF_TYPE_RX_CMPLT:
1512 		rx = (struct msgbuf_rx_complete *)buf;
1513 		m = bwfm_pci_pktid_free(sc, &sc->sc_rx_pkts,
1514 		    letoh32(rx->msg.request_id));
1515 		if (m == NULL)
1516 			break;
1517 		if (letoh16(rx->data_offset))
1518 			m_adj(m, letoh16(rx->data_offset));
1519 		else if (sc->sc_rx_dataoffset)
1520 			m_adj(m, sc->sc_rx_dataoffset);
1521 		m->m_len = m->m_pkthdr.len = letoh16(rx->data_len);
1522 		bwfm_rx(&sc->sc_sc, m);
1523 		if_rxr_put(&sc->sc_rxbuf_ring, 1);
1524 		bwfm_pci_fill_rx_rings(sc);
1525 		break;
1526 	default:
1527 		printf("%s: msgtype 0x%08x\n", __func__, msg->msgtype);
1528 		break;
1529 	}
1530 }
1531 
1532 /* Bus core helpers */
1533 void
1534 bwfm_pci_select_core(struct bwfm_pci_softc *sc, int id)
1535 {
1536 	struct bwfm_softc *bwfm = (void *)sc;
1537 	struct bwfm_core *core;
1538 
1539 	core = bwfm_chip_get_core(bwfm, id);
1540 	if (core == NULL) {
1541 		printf("%s: could not find core to select", DEVNAME(sc));
1542 		return;
1543 	}
1544 
1545 	pci_conf_write(sc->sc_pc, sc->sc_tag,
1546 	    BWFM_PCI_BAR0_WINDOW, core->co_base);
1547 	if (pci_conf_read(sc->sc_pc, sc->sc_tag,
1548 	    BWFM_PCI_BAR0_WINDOW) != core->co_base)
1549 		pci_conf_write(sc->sc_pc, sc->sc_tag,
1550 		    BWFM_PCI_BAR0_WINDOW, core->co_base);
1551 }
1552 
1553 uint32_t
1554 bwfm_pci_buscore_read(struct bwfm_softc *bwfm, uint32_t reg)
1555 {
1556 	struct bwfm_pci_softc *sc = (void *)bwfm;
1557 	uint32_t page, offset;
1558 
1559 	page = reg & ~(BWFM_PCI_BAR0_REG_SIZE - 1);
1560 	offset = reg & (BWFM_PCI_BAR0_REG_SIZE - 1);
1561 	pci_conf_write(sc->sc_pc, sc->sc_tag, BWFM_PCI_BAR0_WINDOW, page);
1562 	return bus_space_read_4(sc->sc_reg_iot, sc->sc_reg_ioh, offset);
1563 }
1564 
1565 void
1566 bwfm_pci_buscore_write(struct bwfm_softc *bwfm, uint32_t reg, uint32_t val)
1567 {
1568 	struct bwfm_pci_softc *sc = (void *)bwfm;
1569 	uint32_t page, offset;
1570 
1571 	page = reg & ~(BWFM_PCI_BAR0_REG_SIZE - 1);
1572 	offset = reg & (BWFM_PCI_BAR0_REG_SIZE - 1);
1573 	pci_conf_write(sc->sc_pc, sc->sc_tag, BWFM_PCI_BAR0_WINDOW, page);
1574 	bus_space_write_4(sc->sc_reg_iot, sc->sc_reg_ioh, offset, val);
1575 }
1576 
1577 int
1578 bwfm_pci_buscore_prepare(struct bwfm_softc *bwfm)
1579 {
1580 	return 0;
1581 }
1582 
1583 int
1584 bwfm_pci_buscore_reset(struct bwfm_softc *bwfm)
1585 {
1586 	struct bwfm_pci_softc *sc = (void *)bwfm;
1587 	struct bwfm_core *core;
1588 	uint32_t reg;
1589 	int i;
1590 
1591 	bwfm_pci_select_core(sc, BWFM_AGENT_CORE_PCIE2);
1592 	reg = pci_conf_read(sc->sc_pc, sc->sc_tag,
1593 	    BWFM_PCI_CFGREG_LINK_STATUS_CTRL);
1594 	pci_conf_write(sc->sc_pc, sc->sc_tag, BWFM_PCI_CFGREG_LINK_STATUS_CTRL,
1595 	    reg & ~BWFM_PCI_CFGREG_LINK_STATUS_CTRL_ASPM_ENAB);
1596 
1597 	bwfm_pci_select_core(sc, BWFM_AGENT_CORE_CHIPCOMMON);
1598 	bus_space_write_4(sc->sc_reg_iot, sc->sc_reg_ioh,
1599 	    BWFM_CHIP_REG_WATCHDOG, 4);
1600 	delay(100 * 1000);
1601 
1602 	bwfm_pci_select_core(sc, BWFM_AGENT_CORE_PCIE2);
1603 	pci_conf_write(sc->sc_pc, sc->sc_tag,
1604 	    BWFM_PCI_CFGREG_LINK_STATUS_CTRL, reg);
1605 
1606 	core = bwfm_chip_get_core(bwfm, BWFM_AGENT_CORE_PCIE2);
1607 	if (core->co_rev <= 13) {
1608 		uint16_t cfg_offset[] = {
1609 		    BWFM_PCI_CFGREG_STATUS_CMD,
1610 		    BWFM_PCI_CFGREG_PM_CSR,
1611 		    BWFM_PCI_CFGREG_MSI_CAP,
1612 		    BWFM_PCI_CFGREG_MSI_ADDR_L,
1613 		    BWFM_PCI_CFGREG_MSI_ADDR_H,
1614 		    BWFM_PCI_CFGREG_MSI_DATA,
1615 		    BWFM_PCI_CFGREG_LINK_STATUS_CTRL2,
1616 		    BWFM_PCI_CFGREG_RBAR_CTRL,
1617 		    BWFM_PCI_CFGREG_PML1_SUB_CTRL1,
1618 		    BWFM_PCI_CFGREG_REG_BAR2_CONFIG,
1619 		    BWFM_PCI_CFGREG_REG_BAR3_CONFIG,
1620 		};
1621 
1622 		for (i = 0; i < nitems(cfg_offset); i++) {
1623 			bus_space_write_4(sc->sc_reg_iot, sc->sc_reg_ioh,
1624 			    BWFM_PCI_PCIE2REG_CONFIGADDR, cfg_offset[i]);
1625 			reg = bus_space_read_4(sc->sc_reg_iot, sc->sc_reg_ioh,
1626 			    BWFM_PCI_PCIE2REG_CONFIGDATA);
1627 			DPRINTFN(3, ("%s: config offset 0x%04x, value 0x%04x\n",
1628 			    DEVNAME(sc), cfg_offset[i], reg));
1629 			bus_space_write_4(sc->sc_reg_iot, sc->sc_reg_ioh,
1630 			    BWFM_PCI_PCIE2REG_CONFIGDATA, reg);
1631 		}
1632 	}
1633 
1634 	reg = bus_space_read_4(sc->sc_reg_iot, sc->sc_reg_ioh,
1635 	    BWFM_PCI_PCIE2REG_MAILBOXINT);
1636 	if (reg != 0xffffffff)
1637 		bus_space_write_4(sc->sc_reg_iot, sc->sc_reg_ioh,
1638 		    BWFM_PCI_PCIE2REG_MAILBOXINT, reg);
1639 
1640 	return 0;
1641 }
1642 
1643 void
1644 bwfm_pci_buscore_activate(struct bwfm_softc *bwfm, const uint32_t rstvec)
1645 {
1646 	struct bwfm_pci_softc *sc = (void *)bwfm;
1647 	bus_space_write_4(sc->sc_tcm_iot, sc->sc_tcm_ioh, 0, rstvec);
1648 }
1649 
1650 static int bwfm_pci_prio2fifo[8] = {
1651 	1, /* best effort */
1652 	0, /* IPTOS_PREC_IMMEDIATE */
1653 	0, /* IPTOS_PREC_PRIORITY */
1654 	1, /* IPTOS_PREC_FLASH */
1655 	2, /* IPTOS_PREC_FLASHOVERRIDE */
1656 	2, /* IPTOS_PREC_CRITIC_ECP */
1657 	3, /* IPTOS_PREC_INTERNETCONTROL */
1658 	3, /* IPTOS_PREC_NETCONTROL */
1659 };
1660 
1661 int
1662 bwfm_pci_flowring_lookup(struct bwfm_pci_softc *sc, struct mbuf *m)
1663 {
1664 	struct ieee80211com *ic = &sc->sc_sc.sc_ic;
1665 	uint8_t *da = mtod(m, uint8_t *);
1666 	struct ether_header *eh;
1667 	int flowid, prio, fifo;
1668 	int i, found, ac;
1669 
1670 	/* No QoS for EAPOL frames. */
1671 	eh = mtod(m, struct ether_header *);
1672 	ac = (eh->ether_type != htons(ETHERTYPE_PAE)) ?
1673 	    M_WME_GETAC(m) : WME_AC_BE;
1674 
1675 	prio = ac;
1676 	fifo = bwfm_pci_prio2fifo[prio];
1677 
1678 	switch (ic->ic_opmode)
1679 	{
1680 	case IEEE80211_M_STA:
1681 		flowid = fifo;
1682 		break;
1683 #ifndef IEEE80211_STA_ONLY
1684 	case IEEE80211_M_HOSTAP:
1685 		if (ETHER_IS_MULTICAST(da))
1686 			da = __UNCONST(etherbroadcastaddr);
1687 		flowid = da[5] * 2 + fifo;
1688 		break;
1689 #endif
1690 	default:
1691 		printf("%s: state not supported\n", DEVNAME(sc));
1692 		return ENOBUFS;
1693 	}
1694 
1695 	found = 0;
1696 	flowid = flowid % sc->sc_max_flowrings;
1697 	for (i = 0; i < sc->sc_max_flowrings; i++) {
1698 		if (ic->ic_opmode == IEEE80211_M_STA &&
1699 		    sc->sc_flowrings[flowid].status >= RING_OPEN &&
1700 		    sc->sc_flowrings[flowid].fifo == fifo) {
1701 			found = 1;
1702 			break;
1703 		}
1704 #ifndef IEEE80211_STA_ONLY
1705 		if (ic->ic_opmode == IEEE80211_M_HOSTAP &&
1706 		    sc->sc_flowrings[flowid].status >= RING_OPEN &&
1707 		    sc->sc_flowrings[flowid].fifo == fifo &&
1708 		    !memcmp(sc->sc_flowrings[flowid].mac, da, ETHER_ADDR_LEN)) {
1709 			found = 1;
1710 			break;
1711 		}
1712 #endif
1713 		flowid = (flowid + 1) % sc->sc_max_flowrings;
1714 	}
1715 
1716 	if (found)
1717 		return flowid;
1718 
1719 	return -1;
1720 }
1721 
1722 void
1723 bwfm_pci_flowring_create(struct bwfm_pci_softc *sc, struct mbuf *m)
1724 {
1725 	struct ieee80211com *ic = &sc->sc_sc.sc_ic;
1726 	struct bwfm_cmd_flowring_create * cmd;
1727 	uint8_t *da = mtod(m, uint8_t *);
1728 	struct ether_header *eh;
1729 	struct bwfm_pci_msgring *ring;
1730 	int flowid, prio, fifo;
1731 	int i, found, ac;
1732 
1733 	cmd = pool_get(&sc->sc_flowring_pool, PR_NOWAIT);
1734 	if (__predict_false(cmd == NULL))
1735 		return;
1736 
1737 	/* No QoS for EAPOL frames. */
1738 	eh = mtod(m, struct ether_header *);
1739 	ac = (eh->ether_type != htons(ETHERTYPE_PAE)) ?
1740 	    M_WME_GETAC(m) : WME_AC_BE;
1741 
1742 	prio = ac;
1743 	fifo = bwfm_pci_prio2fifo[prio];
1744 
1745 	switch (ic->ic_opmode)
1746 	{
1747 	case IEEE80211_M_STA:
1748 		flowid = fifo;
1749 		break;
1750 #ifndef IEEE80211_STA_ONLY
1751 	case IEEE80211_M_HOSTAP:
1752 		if (ETHER_IS_MULTICAST(da))
1753 			da = __UNCONST(etherbroadcastaddr);
1754 		flowid = da[5] * 2 + fifo;
1755 		break;
1756 #endif
1757 	default:
1758 		printf("%s: state not supported\n", DEVNAME(sc));
1759 		return;
1760 	}
1761 
1762 	found = 0;
1763 	flowid = flowid % sc->sc_max_flowrings;
1764 	for (i = 0; i < sc->sc_max_flowrings; i++) {
1765 		ring = &sc->sc_flowrings[flowid];
1766 		if (ring->status == RING_CLOSED) {
1767 			ring->status = RING_OPENING;
1768 			found = 1;
1769 			break;
1770 		}
1771 		flowid = (flowid + 1) % sc->sc_max_flowrings;
1772 	}
1773 
1774 	/*
1775 	 * We cannot recover from that so far.  Only a stop/init
1776 	 * cycle can revive this if it ever happens at all.
1777 	 */
1778 	if (!found) {
1779 		printf("%s: no flowring available\n", DEVNAME(sc));
1780 		return;
1781 	}
1782 
1783 	cmd->sc = sc;
1784 	cmd->m = m;
1785 	cmd->prio = prio;
1786 	cmd->flowid = flowid;
1787 	workqueue_enqueue(sc->flowring_wq, &cmd->wq_cookie, NULL);
1788 }
1789 
1790 void
1791 bwfm_pci_flowring_create_cb(struct work *wk, void *arg) //(struct bwfm_softc *bwfm, void *arg)
1792 {
1793 	struct bwfm_cmd_flowring_create *cmd = container_of(wk, struct bwfm_cmd_flowring_create, wq_cookie);
1794 	struct bwfm_pci_softc *sc = cmd->sc; // (void *)bwfm;
1795 	struct ieee80211com *ic = &sc->sc_sc.sc_ic;
1796 	struct msgbuf_tx_flowring_create_req *req;
1797 	struct bwfm_pci_msgring *ring;
1798 	uint8_t *da, *sa;
1799 
1800 	da = mtod(cmd->m, char *) + 0 * ETHER_ADDR_LEN;
1801 	sa = mtod(cmd->m, char *) + 1 * ETHER_ADDR_LEN;
1802 
1803 	ring = &sc->sc_flowrings[cmd->flowid];
1804 	if (ring->status != RING_OPENING) {
1805 		printf("%s: flowring not opening\n", DEVNAME(sc));
1806 		return;
1807 	}
1808 
1809 	if (bwfm_pci_setup_flowring(sc, ring, 512, 48)) {
1810 		printf("%s: cannot setup flowring\n", DEVNAME(sc));
1811 		return;
1812 	}
1813 
1814 	req = bwfm_pci_ring_write_reserve(sc, &sc->sc_ctrl_submit);
1815 	if (req == NULL) {
1816 		printf("%s: cannot reserve for flowring\n", DEVNAME(sc));
1817 		return;
1818 	}
1819 
1820 	ring->status = RING_OPENING;
1821 	ring->fifo = bwfm_pci_prio2fifo[cmd->prio];
1822 	ring->m = cmd->m;
1823 	memcpy(ring->mac, da, ETHER_ADDR_LEN);
1824 #ifndef IEEE80211_STA_ONLY
1825 	if (ic->ic_opmode == IEEE80211_M_HOSTAP && ETHER_IS_MULTICAST(da))
1826 		memcpy(ring->mac, etherbroadcastaddr, ETHER_ADDR_LEN);
1827 #endif
1828 
1829 	req->msg.msgtype = MSGBUF_TYPE_FLOW_RING_CREATE;
1830 	req->msg.ifidx = 0;
1831 	req->msg.request_id = 0;
1832 	req->tid = bwfm_pci_prio2fifo[cmd->prio];
1833 	req->flow_ring_id = letoh16(cmd->flowid + 2);
1834 	memcpy(req->da, da, ETHER_ADDR_LEN);
1835 	memcpy(req->sa, sa, ETHER_ADDR_LEN);
1836 	req->flow_ring_addr.high_addr =
1837 	    letoh32(BWFM_PCI_DMA_DVA(ring->ring) >> 32);
1838 	req->flow_ring_addr.low_addr =
1839 	    letoh32(BWFM_PCI_DMA_DVA(ring->ring) & 0xffffffff);
1840 	req->max_items = letoh16(512);
1841 	req->len_item = letoh16(48);
1842 
1843 	bwfm_pci_ring_write_commit(sc, &sc->sc_ctrl_submit);
1844 	pool_put(&sc->sc_flowring_pool, cmd);
1845 }
1846 
1847 void
1848 bwfm_pci_flowring_delete(struct bwfm_pci_softc *sc, int flowid)
1849 {
1850 	struct msgbuf_tx_flowring_delete_req *req;
1851 	struct bwfm_pci_msgring *ring;
1852 
1853 	ring = &sc->sc_flowrings[flowid];
1854 	if (ring->status != RING_OPEN) {
1855 		printf("%s: flowring not open\n", DEVNAME(sc));
1856 		return;
1857 	}
1858 
1859 	req = bwfm_pci_ring_write_reserve(sc, &sc->sc_ctrl_submit);
1860 	if (req == NULL) {
1861 		printf("%s: cannot reserve for flowring\n", DEVNAME(sc));
1862 		return;
1863 	}
1864 
1865 	ring->status = RING_CLOSING;
1866 
1867 	req->msg.msgtype = MSGBUF_TYPE_FLOW_RING_DELETE;
1868 	req->msg.ifidx = 0;
1869 	req->msg.request_id = 0;
1870 	req->flow_ring_id = letoh16(flowid + 2);
1871 	req->reason = 0;
1872 
1873 	bwfm_pci_ring_write_commit(sc, &sc->sc_ctrl_submit);
1874 }
1875 
1876 void
1877 bwfm_pci_stop(struct bwfm_softc *bwfm)
1878 {
1879 	struct bwfm_pci_softc *sc = (void *)bwfm;
1880 	struct bwfm_pci_msgring *ring;
1881 	int i;
1882 
1883 	for (i = 0; i < sc->sc_max_flowrings; i++) {
1884 		ring = &sc->sc_flowrings[i];
1885 		if (ring->status == RING_OPEN)
1886 			bwfm_pci_flowring_delete(sc, i);
1887 	}
1888 }
1889 
1890 int
1891 bwfm_pci_txcheck(struct bwfm_softc *bwfm)
1892 {
1893 	struct bwfm_pci_softc *sc = (void *)bwfm;
1894 	struct bwfm_pci_msgring *ring;
1895 	int i;
1896 
1897 	/* If we are transitioning, we cannot send. */
1898 	for (i = 0; i < sc->sc_max_flowrings; i++) {
1899 		ring = &sc->sc_flowrings[i];
1900 		if (ring->status == RING_OPENING)
1901 			return ENOBUFS;
1902 	}
1903 
1904 	if (bwfm_pci_pktid_avail(sc, &sc->sc_tx_pkts)) {
1905 		sc->sc_tx_pkts_full = 1;
1906 		return ENOBUFS;
1907 	}
1908 
1909 	return 0;
1910 }
1911 
1912 int
1913 bwfm_pci_txdata(struct bwfm_softc *bwfm, struct mbuf **mp)
1914 {
1915 	struct bwfm_pci_softc *sc = (void *)bwfm;
1916 	struct bwfm_pci_msgring *ring;
1917 	struct msgbuf_tx_msghdr *tx;
1918 	uint32_t pktid;
1919 	paddr_t paddr;
1920 	uint64_t devaddr;
1921 	struct ether_header *eh;
1922 	int flowid, ret, ac;
1923 
1924 	flowid = bwfm_pci_flowring_lookup(sc, *mp);
1925 	if (flowid < 0) {
1926 		/*
1927 		 * We cannot send the packet right now as there is
1928 		 * no flowring yet.  The flowring will be created
1929 		 * asynchronously.  While the ring is transitioning
1930 		 * the TX check will tell the upper layers that we
1931 		 * cannot send packets right now.  When the flowring
1932 		 * is created the queue will be restarted and this
1933 		 * mbuf will be transmitted.
1934 		 */
1935 		bwfm_pci_flowring_create(sc, *mp);
1936 		return 0;
1937 	}
1938 
1939 	ring = &sc->sc_flowrings[flowid];
1940 	if (ring->status == RING_OPENING ||
1941 	    ring->status == RING_CLOSING) {
1942 		printf("%s: tried to use a flow that was "
1943 		    "transitioning in status %d\n",
1944 		    DEVNAME(sc), ring->status);
1945 		return ENOBUFS;
1946 	}
1947 
1948 	tx = bwfm_pci_ring_write_reserve(sc, ring);
1949 	if (tx == NULL)
1950 		return ENOBUFS;
1951 
1952 	/* No QoS for EAPOL frames. */
1953 	eh = mtod(*mp, struct ether_header *);
1954 	ac = (eh->ether_type != htons(ETHERTYPE_PAE)) ?
1955 	    M_WME_GETAC(*mp) : WME_AC_BE;
1956 
1957 	memset(tx, 0, sizeof(*tx));
1958 	tx->msg.msgtype = MSGBUF_TYPE_TX_POST;
1959 	tx->msg.ifidx = 0;
1960 	tx->flags = BWFM_MSGBUF_PKT_FLAGS_FRAME_802_3;
1961 	tx->flags |= ac << BWFM_MSGBUF_PKT_FLAGS_PRIO_SHIFT;
1962 	tx->seg_cnt = 1;
1963 	memcpy(tx->txhdr, mtod(*mp, char *), ETHER_HDR_LEN);
1964 
1965 	ret = bwfm_pci_pktid_new(sc, &sc->sc_tx_pkts, mp, &pktid, &paddr);
1966 	if (ret) {
1967 		if (ret == ENOBUFS) {
1968 			printf("%s: no pktid available for TX\n",
1969 			    DEVNAME(sc));
1970 			sc->sc_tx_pkts_full = 1;
1971 		}
1972 		bwfm_pci_ring_write_cancel(sc, ring, 1);
1973 		return ret;
1974 	}
1975 	devaddr = paddr + ETHER_HDR_LEN;
1976 
1977 	tx->msg.request_id = htole32(pktid);
1978 	tx->data_len = htole16((*mp)->m_len - ETHER_HDR_LEN);
1979 	tx->data_buf_addr.high_addr = htole32(devaddr >> 32);
1980 	tx->data_buf_addr.low_addr = htole32(devaddr & 0xffffffff);
1981 
1982 	bwfm_pci_ring_write_commit(sc, ring);
1983 	return 0;
1984 }
1985 
1986 #ifdef BWFM_DEBUG
1987 void
1988 bwfm_pci_debug_console(struct bwfm_pci_softc *sc)
1989 {
1990 	uint32_t newidx = bus_space_read_4(sc->sc_tcm_iot, sc->sc_tcm_ioh,
1991 	    sc->sc_console_base_addr + BWFM_CONSOLE_WRITEIDX);
1992 
1993 	if (newidx != sc->sc_console_readidx)
1994 		DPRINTFN(3, ("BWFM CONSOLE: "));
1995 	while (newidx != sc->sc_console_readidx) {
1996 		uint8_t ch = bus_space_read_1(sc->sc_tcm_iot, sc->sc_tcm_ioh,
1997 		    sc->sc_console_buf_addr + sc->sc_console_readidx);
1998 		sc->sc_console_readidx++;
1999 		if (sc->sc_console_readidx == sc->sc_console_buf_size)
2000 			sc->sc_console_readidx = 0;
2001 		if (ch == '\r')
2002 			continue;
2003 		DPRINTFN(3, ("%c", ch));
2004 	}
2005 }
2006 #endif
2007 
2008 int
2009 bwfm_pci_intr(void *v)
2010 {
2011 	struct bwfm_pci_softc *sc = (void *)v;
2012 	uint32_t status;
2013 
2014 	if ((status = bus_space_read_4(sc->sc_reg_iot, sc->sc_reg_ioh,
2015 	    BWFM_PCI_PCIE2REG_MAILBOXINT)) == 0)
2016 		return 0;
2017 
2018 	bwfm_pci_intr_disable(sc);
2019 	bus_space_write_4(sc->sc_reg_iot, sc->sc_reg_ioh,
2020 	    BWFM_PCI_PCIE2REG_MAILBOXINT, status);
2021 
2022 	if (status & (BWFM_PCI_PCIE2REG_MAILBOXMASK_INT_FN0_0 |
2023 	    BWFM_PCI_PCIE2REG_MAILBOXMASK_INT_FN0_1))
2024 		printf("%s: handle MB data\n", __func__);
2025 
2026 	if (status & BWFM_PCI_PCIE2REG_MAILBOXMASK_INT_D2H_DB) {
2027 		bwfm_pci_ring_rx(sc, &sc->sc_rx_complete);
2028 		bwfm_pci_ring_rx(sc, &sc->sc_tx_complete);
2029 		bwfm_pci_ring_rx(sc, &sc->sc_ctrl_complete);
2030 	}
2031 
2032 #ifdef BWFM_DEBUG
2033 	bwfm_pci_debug_console(sc);
2034 #endif
2035 
2036 	bwfm_pci_intr_enable(sc);
2037 	return 1;
2038 }
2039 
2040 void
2041 bwfm_pci_intr_enable(struct bwfm_pci_softc *sc)
2042 {
2043 	bus_space_write_4(sc->sc_reg_iot, sc->sc_reg_ioh,
2044 	    BWFM_PCI_PCIE2REG_MAILBOXMASK,
2045 	    BWFM_PCI_PCIE2REG_MAILBOXMASK_INT_FN0_0 |
2046 	    BWFM_PCI_PCIE2REG_MAILBOXMASK_INT_FN0_1 |
2047 	    BWFM_PCI_PCIE2REG_MAILBOXMASK_INT_D2H_DB);
2048 }
2049 
2050 void
2051 bwfm_pci_intr_disable(struct bwfm_pci_softc *sc)
2052 {
2053 	bus_space_write_4(sc->sc_reg_iot, sc->sc_reg_ioh,
2054 	    BWFM_PCI_PCIE2REG_MAILBOXMASK, 0);
2055 }
2056 
2057 /* Msgbuf protocol implementation */
2058 int
2059 bwfm_pci_msgbuf_query_dcmd(struct bwfm_softc *bwfm, int ifidx,
2060     int cmd, char *buf, size_t *len)
2061 {
2062 	struct bwfm_pci_softc *sc = (void *)bwfm;
2063 	struct msgbuf_ioctl_req_hdr *req;
2064 	struct mbuf *m;
2065 	size_t buflen;
2066 	int s;
2067 
2068 	s = splnet();
2069 	sc->sc_ioctl_resp_pktid = -1;
2070 	req = bwfm_pci_ring_write_reserve(sc, &sc->sc_ctrl_submit);
2071 	if (req == NULL) {
2072 		printf("%s: cannot reserve for write\n", DEVNAME(sc));
2073 		splx(s);
2074 		return 1;
2075 	}
2076 	req->msg.msgtype = MSGBUF_TYPE_IOCTLPTR_REQ;
2077 	req->msg.ifidx = 0;
2078 	req->msg.flags = 0;
2079 	req->msg.request_id = htole32(MSGBUF_IOCTL_REQ_PKTID);
2080 	req->cmd = htole32(cmd);
2081 	req->output_buf_len = htole16(*len);
2082 	req->trans_id = htole16(sc->sc_ioctl_reqid++);
2083 
2084 	buflen = uimin(*len, BWFM_DMA_H2D_IOCTL_BUF_LEN);
2085 	req->input_buf_len = htole16(buflen);
2086 	req->req_buf_addr.high_addr =
2087 	    htole32((uint64_t)BWFM_PCI_DMA_DVA(sc->sc_ioctl_buf) >> 32);
2088 	req->req_buf_addr.low_addr =
2089 	    htole32((uint64_t)BWFM_PCI_DMA_DVA(sc->sc_ioctl_buf) & 0xffffffff);
2090 	if (buf)
2091 		memcpy(BWFM_PCI_DMA_KVA(sc->sc_ioctl_buf), buf, buflen);
2092 	else
2093 		memset(BWFM_PCI_DMA_KVA(sc->sc_ioctl_buf), 0, buflen);
2094 
2095 	bwfm_pci_ring_write_commit(sc, &sc->sc_ctrl_submit);
2096 	splx(s);
2097 
2098 	if (tsleep(&sc->sc_ioctl_buf, PCATCH, "bwfm", hz)) {
2099 		printf("%s: timeout waiting for ioctl response\n",
2100 		    DEVNAME(sc));
2101 		return 1;
2102 	}
2103 
2104 	m = bwfm_pci_pktid_free(sc, &sc->sc_rx_pkts, sc->sc_ioctl_resp_pktid);
2105 	if (m == NULL)
2106 		return 1;
2107 
2108 	*len = uimin(buflen, sc->sc_ioctl_resp_ret_len);
2109 	if (buf)
2110 		memcpy(buf, mtod(m, char *), *len);
2111 	m_freem(m);
2112 	splx(s);
2113 
2114 	return 0;
2115 }
2116 
2117 int
2118 bwfm_pci_msgbuf_set_dcmd(struct bwfm_softc *bwfm, int ifidx,
2119     int cmd, char *buf, size_t len)
2120 {
2121 	return bwfm_pci_msgbuf_query_dcmd(bwfm, ifidx, cmd, buf, &len);
2122 }
2123