xref: /netbsd-src/sys/dev/pci/if_bwfm_pci.c (revision 481d3881954fd794ca5f2d880b68c53a5db8620e)
1 /*	$NetBSD: if_bwfm_pci.c,v 1.14 2024/07/05 04:31:51 rin Exp $	*/
2 /*	$OpenBSD: if_bwfm_pci.c,v 1.18 2018/02/08 05:00:38 patrick Exp $	*/
3 /*
4  * Copyright (c) 2010-2016 Broadcom Corporation
5  * Copyright (c) 2017 Patrick Wildt <patrick@blueri.se>
6  *
7  * Permission to use, copy, modify, and/or distribute this software for any
8  * purpose with or without fee is hereby granted, provided that the above
9  * copyright notice and this permission notice appear in all copies.
10  *
11  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
12  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
13  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
14  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
15  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
16  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
17  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
18  */
19 
20 #include <sys/cdefs.h>
21 __KERNEL_RCSID(0, "$NetBSD: if_bwfm_pci.c,v 1.14 2024/07/05 04:31:51 rin Exp $");
22 
23 #include <sys/param.h>
24 #include <sys/systm.h>
25 #include <sys/buf.h>
26 #include <sys/kernel.h>
27 #include <sys/kmem.h>
28 #include <sys/device.h>
29 #include <sys/pool.h>
30 #include <sys/workqueue.h>
31 #include <sys/socket.h>
32 
33 #include <net/bpf.h>
34 #include <net/if.h>
35 #include <net/if_dl.h>
36 #include <net/if_ether.h>
37 #include <net/if_media.h>
38 
39 #include <netinet/in.h>
40 
41 #include <net80211/ieee80211_var.h>
42 
43 #include <dev/pci/pcireg.h>
44 #include <dev/pci/pcivar.h>
45 #include <dev/pci/pcidevs.h>
46 
47 #include <dev/ic/bwfmreg.h>
48 #include <dev/ic/bwfmvar.h>
49 #include <dev/pci/if_bwfm_pci.h>
50 
51 #define BWFM_DMA_D2H_SCRATCH_BUF_LEN		8
52 #define BWFM_DMA_D2H_RINGUPD_BUF_LEN		1024
53 #define BWFM_DMA_H2D_IOCTL_BUF_LEN		ETHER_MAX_LEN
54 
55 #define BWFM_NUM_TX_MSGRINGS			2
56 #define BWFM_NUM_RX_MSGRINGS			3
57 
58 #define BWFM_NUM_TX_PKTIDS			2048
59 #define BWFM_NUM_RX_PKTIDS			1024
60 
61 #define BWFM_NUM_TX_DESCS			1
62 #define BWFM_NUM_RX_DESCS			1
63 
64 #ifdef BWFM_DEBUG
65 #define DPRINTF(x)	do { if (bwfm_debug > 0) printf x; } while (0)
66 #define DPRINTFN(n, x)	do { if (bwfm_debug >= (n)) printf x; } while (0)
67 static int bwfm_debug = 2;
68 #else
69 #define DPRINTF(x)	do { ; } while (0)
70 #define DPRINTFN(n, x)	do { ; } while (0)
71 #endif
72 
73 #define DEVNAME(sc)	device_xname((sc)->sc_sc.sc_dev)
74 #define letoh16		htole16
75 #define letoh32		htole32
76 #define nitems(x)	__arraycount(x)
77 
78 enum ring_status {
79 	RING_CLOSED,
80 	RING_CLOSING,
81 	RING_OPEN,
82 	RING_OPENING,
83 };
84 
85 struct bwfm_pci_msgring {
86 	uint32_t		 w_idx_addr;
87 	uint32_t		 r_idx_addr;
88 	uint32_t		 w_ptr;
89 	uint32_t		 r_ptr;
90 	int			 nitem;
91 	int			 itemsz;
92 	enum ring_status	 status;
93 	struct bwfm_pci_dmamem	*ring;
94 	struct mbuf		*m;
95 
96 	int			 fifo;
97 	uint8_t			 mac[ETHER_ADDR_LEN];
98 };
99 
100 struct bwfm_pci_buf {
101 	bus_dmamap_t	 bb_map;
102 	struct mbuf	*bb_m;
103 };
104 
105 struct bwfm_pci_pkts {
106 	struct bwfm_pci_buf	*pkts;
107 	uint32_t		 npkt;
108 	int			 last;
109 };
110 
111 struct if_rxring {
112 	u_int	rxr_total;
113 	u_int	rxr_inuse;
114 };
115 
116 struct bwfm_cmd_flowring_create {
117 	struct work		 wq_cookie;
118 	struct bwfm_pci_softc	*sc;
119 	struct mbuf		*m;
120 	int			 flowid;
121 	int			 prio;
122 };
123 
124 struct bwfm_pci_softc {
125 	struct bwfm_softc	 sc_sc;
126 	pci_chipset_tag_t	 sc_pc;
127 	pcitag_t		 sc_tag;
128 	pcireg_t		 sc_id;
129 	void			*sc_ih;
130 	pci_intr_handle_t	*sc_pihp;
131 
132 	bus_space_tag_t		 sc_reg_iot;
133 	bus_space_handle_t	 sc_reg_ioh;
134 	bus_size_t		 sc_reg_ios;
135 
136 	bus_space_tag_t		 sc_tcm_iot;
137 	bus_space_handle_t	 sc_tcm_ioh;
138 	bus_size_t		 sc_tcm_ios;
139 
140 	bus_dma_tag_t		 sc_dmat;
141 
142 	uint32_t		 sc_shared_address;
143 	uint32_t		 sc_shared_flags;
144 	uint8_t			 sc_shared_version;
145 
146 	uint8_t			 sc_dma_idx_sz;
147 	struct bwfm_pci_dmamem	*sc_dma_idx_buf;
148 	size_t			 sc_dma_idx_bufsz;
149 
150 	uint16_t		 sc_max_rxbufpost;
151 	uint32_t		 sc_rx_dataoffset;
152 	uint32_t		 sc_htod_mb_data_addr;
153 	uint32_t		 sc_dtoh_mb_data_addr;
154 	uint32_t		 sc_ring_info_addr;
155 
156 	uint32_t		 sc_console_base_addr;
157 	uint32_t		 sc_console_buf_addr;
158 	uint32_t		 sc_console_buf_size;
159 	uint32_t		 sc_console_readidx;
160 
161 	struct pool		 sc_flowring_pool;
162 	struct workqueue	*flowring_wq;
163 
164 	uint16_t		 sc_max_flowrings;
165 	uint16_t		 sc_max_submissionrings;
166 	uint16_t		 sc_max_completionrings;
167 
168 	struct bwfm_pci_msgring	 sc_ctrl_submit;
169 	struct bwfm_pci_msgring	 sc_rxpost_submit;
170 	struct bwfm_pci_msgring	 sc_ctrl_complete;
171 	struct bwfm_pci_msgring	 sc_tx_complete;
172 	struct bwfm_pci_msgring	 sc_rx_complete;
173 	struct bwfm_pci_msgring	*sc_flowrings;
174 
175 	struct bwfm_pci_dmamem	*sc_scratch_buf;
176 	struct bwfm_pci_dmamem	*sc_ringupd_buf;
177 
178 	struct bwfm_pci_dmamem	*sc_ioctl_buf;
179 	int			 sc_ioctl_reqid;
180 	uint32_t		 sc_ioctl_resp_pktid;
181 	uint32_t		 sc_ioctl_resp_ret_len;
182 	uint32_t		 sc_ioctl_resp_status;
183 	int			 sc_ioctl_poll;
184 
185 	struct if_rxring	 sc_ioctl_ring;
186 	struct if_rxring	 sc_event_ring;
187 	struct if_rxring	 sc_rxbuf_ring;
188 
189 	struct bwfm_pci_pkts	 sc_rx_pkts;
190 	struct bwfm_pci_pkts	 sc_tx_pkts;
191 	int			 sc_tx_pkts_full;
192 };
193 
194 struct bwfm_pci_dmamem {
195 	bus_dmamap_t		bdm_map;
196 	bus_dma_segment_t	bdm_seg;
197 	size_t			bdm_size;
198 	char *			bdm_kva;
199 };
200 
201 #define BWFM_PCI_DMA_MAP(_bdm)	((_bdm)->bdm_map)
202 #define BWFM_PCI_DMA_LEN(_bdm)	((_bdm)->bdm_size)
203 #define BWFM_PCI_DMA_DVA(_bdm)	(uint64_t)((_bdm)->bdm_map->dm_segs[0].ds_addr)
204 #define BWFM_PCI_DMA_KVA(_bdm)	((_bdm)->bdm_kva)
205 
206 static u_int	 if_rxr_get(struct if_rxring *rxr, unsigned int max);
207 static void	 if_rxr_put(struct if_rxring *rxr, unsigned int n);
208 static void	 if_rxr_init(struct if_rxring *rxr, unsigned int lwm, unsigned int hwm);
209 
210 int		 bwfm_pci_match(device_t parent, cfdata_t match, void *aux);
211 void		 bwfm_pci_attachhook(device_t);
212 void		 bwfm_pci_attach(device_t, device_t, void *);
213 int		 bwfm_pci_detach(device_t, int);
214 
215 int		 bwfm_pci_intr(void *);
216 void		 bwfm_pci_intr_enable(struct bwfm_pci_softc *);
217 void		 bwfm_pci_intr_disable(struct bwfm_pci_softc *);
218 int		 bwfm_pci_load_microcode(struct bwfm_pci_softc *, const u_char *,
219 		    size_t);
220 void		 bwfm_pci_select_core(struct bwfm_pci_softc *, int );
221 
222 struct bwfm_pci_dmamem *
223 		 bwfm_pci_dmamem_alloc(struct bwfm_pci_softc *, bus_size_t,
224 		    bus_size_t);
225 void		 bwfm_pci_dmamem_free(struct bwfm_pci_softc *, struct bwfm_pci_dmamem *);
226 int		 bwfm_pci_pktid_avail(struct bwfm_pci_softc *,
227 		    struct bwfm_pci_pkts *);
228 int		 bwfm_pci_pktid_new(struct bwfm_pci_softc *,
229 		    struct bwfm_pci_pkts *, struct mbuf **,
230 		    uint32_t *, paddr_t *);
231 struct mbuf *	 bwfm_pci_pktid_free(struct bwfm_pci_softc *,
232 		    struct bwfm_pci_pkts *, uint32_t);
233 void		 bwfm_pci_fill_rx_ioctl_ring(struct bwfm_pci_softc *,
234 		    struct if_rxring *, uint32_t);
235 void		 bwfm_pci_fill_rx_buf_ring(struct bwfm_pci_softc *);
236 void		 bwfm_pci_fill_rx_rings(struct bwfm_pci_softc *);
237 int		 bwfm_pci_setup_ring(struct bwfm_pci_softc *, struct bwfm_pci_msgring *,
238 		    int, size_t, uint32_t, uint32_t, int, uint32_t, uint32_t *);
239 int		 bwfm_pci_setup_flowring(struct bwfm_pci_softc *, struct bwfm_pci_msgring *,
240 		    int, size_t);
241 
242 void		 bwfm_pci_ring_bell(struct bwfm_pci_softc *,
243 		    struct bwfm_pci_msgring *);
244 void		 bwfm_pci_ring_update_rptr(struct bwfm_pci_softc *,
245 		    struct bwfm_pci_msgring *);
246 void		 bwfm_pci_ring_update_wptr(struct bwfm_pci_softc *,
247 		    struct bwfm_pci_msgring *);
248 void		 bwfm_pci_ring_write_rptr(struct bwfm_pci_softc *,
249 		    struct bwfm_pci_msgring *);
250 void		 bwfm_pci_ring_write_wptr(struct bwfm_pci_softc *,
251 		    struct bwfm_pci_msgring *);
252 void *		 bwfm_pci_ring_write_reserve(struct bwfm_pci_softc *,
253 		    struct bwfm_pci_msgring *);
254 void *		 bwfm_pci_ring_write_reserve_multi(struct bwfm_pci_softc *,
255 		    struct bwfm_pci_msgring *, int, int *);
256 void *		 bwfm_pci_ring_read_avail(struct bwfm_pci_softc *,
257 		    struct bwfm_pci_msgring *, int *);
258 void		 bwfm_pci_ring_read_commit(struct bwfm_pci_softc *,
259 		    struct bwfm_pci_msgring *, int);
260 void		 bwfm_pci_ring_write_commit(struct bwfm_pci_softc *,
261 		    struct bwfm_pci_msgring *);
262 void		 bwfm_pci_ring_write_cancel(struct bwfm_pci_softc *,
263 		    struct bwfm_pci_msgring *, int);
264 
265 void		 bwfm_pci_ring_rx(struct bwfm_pci_softc *,
266 		    struct bwfm_pci_msgring *);
267 void		 bwfm_pci_msg_rx(struct bwfm_pci_softc *, void *);
268 
269 uint32_t	 bwfm_pci_buscore_read(struct bwfm_softc *, uint32_t);
270 void		 bwfm_pci_buscore_write(struct bwfm_softc *, uint32_t,
271 		    uint32_t);
272 int		 bwfm_pci_buscore_prepare(struct bwfm_softc *);
273 int		 bwfm_pci_buscore_reset(struct bwfm_softc *);
274 void		 bwfm_pci_buscore_activate(struct bwfm_softc *, const uint32_t);
275 
276 int		 bwfm_pci_flowring_lookup(struct bwfm_pci_softc *,
277 		     struct mbuf *);
278 void		 bwfm_pci_flowring_create(struct bwfm_pci_softc *,
279 		     struct mbuf *);
280 void		 bwfm_pci_flowring_create_cb(struct work *, void *);
281 void		 bwfm_pci_flowring_delete(struct bwfm_pci_softc *, int);
282 
283 void		 bwfm_pci_stop(struct bwfm_softc *);
284 int		 bwfm_pci_txcheck(struct bwfm_softc *);
285 int		 bwfm_pci_txdata(struct bwfm_softc *, struct mbuf **);
286 
287 #ifdef BWFM_DEBUG
288 void		 bwfm_pci_debug_console(struct bwfm_pci_softc *);
289 #endif
290 
291 int		 bwfm_pci_msgbuf_query_dcmd(struct bwfm_softc *, int,
292 		    int, char *, size_t *);
293 int		 bwfm_pci_msgbuf_set_dcmd(struct bwfm_softc *, int,
294 		    int, char *, size_t);
295 
296 static const struct bwfm_buscore_ops bwfm_pci_buscore_ops = {
297 	.bc_read = bwfm_pci_buscore_read,
298 	.bc_write = bwfm_pci_buscore_write,
299 	.bc_prepare = bwfm_pci_buscore_prepare,
300 	.bc_reset = bwfm_pci_buscore_reset,
301 	.bc_setup = NULL,
302 	.bc_activate = bwfm_pci_buscore_activate,
303 };
304 
305 static const struct bwfm_bus_ops bwfm_pci_bus_ops = {
306 	.bs_init = NULL,
307 	.bs_stop = bwfm_pci_stop,
308 	.bs_txcheck = bwfm_pci_txcheck,
309 	.bs_txdata = bwfm_pci_txdata,
310 	.bs_txctl = NULL,
311 	.bs_rxctl = NULL,
312 };
313 
314 static const struct bwfm_proto_ops bwfm_pci_msgbuf_ops = {
315 	.proto_query_dcmd = bwfm_pci_msgbuf_query_dcmd,
316 	.proto_set_dcmd = bwfm_pci_msgbuf_set_dcmd,
317 };
318 
319 
320 CFATTACH_DECL_NEW(bwfm_pci, sizeof(struct bwfm_pci_softc),
321     bwfm_pci_match, bwfm_pci_attach, bwfm_pci_detach, NULL);
322 
323 static const struct bwfm_firmware_selector bwfm_pci_fwtab[] = {
324 	BWFM_FW_ENTRY(BRCM_CC_43602_CHIP_ID,
325 		      BWFM_FWSEL_ALLREVS, "brcmfmac43602-pcie"),
326 
327 	BWFM_FW_ENTRY(BRCM_CC_43465_CHIP_ID,
328 		      BWFM_FWSEL_REV_GE(4), "brcmfmac4366c-pcie"),
329 
330 	BWFM_FW_ENTRY(BRCM_CC_4350_CHIP_ID,
331 		      BWFM_FWSEL_REV_LE(7), "brcmfmac4350c2-pcie"),
332 	BWFM_FW_ENTRY(BRCM_CC_4350_CHIP_ID,
333 		      BWFM_FWSEL_REV_GE(8), "brcmfmac4350-pcie"),
334 
335 	BWFM_FW_ENTRY(BRCM_CC_43525_CHIP_ID,
336 		      BWFM_FWSEL_REV_GE(4), "brcmfmac4365c-pcie"),
337 
338 	BWFM_FW_ENTRY(BRCM_CC_4356_CHIP_ID,
339 		      BWFM_FWSEL_ALLREVS, "brcmfmac4356-pcie"),
340 
341 	BWFM_FW_ENTRY(BRCM_CC_43567_CHIP_ID,
342 		      BWFM_FWSEL_ALLREVS, "brcmfmac43570-pcie"),
343 	BWFM_FW_ENTRY(BRCM_CC_43569_CHIP_ID,
344 		      BWFM_FWSEL_ALLREVS, "brcmfmac43570-pcie"),
345 	BWFM_FW_ENTRY(BRCM_CC_43570_CHIP_ID,
346 		      BWFM_FWSEL_ALLREVS, "brcmfmac43570-pcie"),
347 
348 	BWFM_FW_ENTRY(BRCM_CC_4358_CHIP_ID,
349 		      BWFM_FWSEL_ALLREVS, "brcmfmac4358-pcie"),
350 
351 	BWFM_FW_ENTRY(BRCM_CC_4359_CHIP_ID,
352 		      BWFM_FWSEL_ALLREVS, "brcmfmac4359-pcie"),
353 
354 	BWFM_FW_ENTRY(BRCM_CC_4365_CHIP_ID,
355 		      BWFM_FWSEL_REV_LE(3), "brcmfmac4365b-pcie"),
356 	BWFM_FW_ENTRY(BRCM_CC_4365_CHIP_ID,
357 		      BWFM_FWSEL_REV_GE(4), "brcmfmac4365c-pcie"),
358 
359 	BWFM_FW_ENTRY(BRCM_CC_4366_CHIP_ID,
360 		      BWFM_FWSEL_REV_LE(3), "brcmfmac4366b-pcie"),
361 	BWFM_FW_ENTRY(BRCM_CC_4366_CHIP_ID,
362 		      BWFM_FWSEL_REV_GE(4), "brcmfmac4366c-pcie"),
363 	BWFM_FW_ENTRY(BRCM_CC_43664_CHIP_ID,
364 		      BWFM_FWSEL_REV_GE(4), "brcmfmac4366c-pcie"),
365 
366 	BWFM_FW_ENTRY(BRCM_CC_4371_CHIP_ID,
367 		      BWFM_FWSEL_ALLREVS, "brcmfmac4371-pcie"),
368 
369 	BWFM_FW_ENTRY_END
370 };
371 
372 static const struct device_compatible_entry compat_data[] = {
373 	{ .id = PCI_ID_CODE(PCI_VENDOR_BROADCOM,
374 		PCI_PRODUCT_BROADCOM_BCM43602), },
375 
376 	{ .id = PCI_ID_CODE(PCI_VENDOR_BROADCOM,
377 		PCI_PRODUCT_BROADCOM_BCM4350), },
378 
379 	PCI_COMPAT_EOL
380 };
381 
382 static struct mbuf *
MCLGETI(struct bwfm_pci_softc * sc __unused,int how,struct ifnet * ifp __unused,u_int size)383 MCLGETI(struct bwfm_pci_softc *sc __unused, int how,
384     struct ifnet *ifp __unused, u_int size)
385 {
386 	struct mbuf *m;
387 
388 	MGETHDR(m, how, MT_DATA);
389 	if (m == NULL)
390 		return NULL;
391 
392 	MEXTMALLOC(m, size, how);
393 	if ((m->m_flags & M_EXT) == 0) {
394 		m_freem(m);
395 		return NULL;
396 	}
397 	return m;
398 }
399 
400 int
bwfm_pci_match(device_t parent,cfdata_t match,void * aux)401 bwfm_pci_match(device_t parent, cfdata_t match, void *aux)
402 {
403 	struct pci_attach_args *pa = aux;
404 
405 	return pci_compatible_match(pa, compat_data);
406 }
407 
408 void
bwfm_pci_attach(device_t parent,device_t self,void * aux)409 bwfm_pci_attach(device_t parent, device_t self, void *aux)
410 {
411 	struct bwfm_pci_softc *sc = device_private(self);
412 	struct pci_attach_args *pa = (struct pci_attach_args *)aux;
413 	const char *intrstr;
414 	char intrbuf[PCI_INTRSTR_LEN];
415 
416 	sc->sc_sc.sc_dev = self;
417 
418 	if (pci_mapreg_map(pa, PCI_MAPREG_START + 0x00,
419 	    PCI_MAPREG_MEM_TYPE_64BIT, 0, &sc->sc_reg_iot, &sc->sc_reg_ioh,
420 	    NULL, &sc->sc_reg_ios)) {
421 		printf(": can't map bar0\n");
422 		return;
423 	}
424 
425 	if (pci_mapreg_map(pa, PCI_MAPREG_START + 0x08,
426 	    PCI_MAPREG_MEM_TYPE_64BIT, 0, &sc->sc_tcm_iot, &sc->sc_tcm_ioh,
427 	    NULL, &sc->sc_tcm_ios)) {
428 		printf(": can't map bar1\n");
429 		goto bar0;
430 	}
431 
432 	sc->sc_pc = pa->pa_pc;
433 	sc->sc_tag = pa->pa_tag;
434 	sc->sc_id = pa->pa_id;
435 
436 	if (pci_dma64_available(pa))
437 		sc->sc_dmat = pa->pa_dmat64;
438 	else
439 		sc->sc_dmat = pa->pa_dmat;
440 
441 	/* Map and establish the interrupt. */
442 	if (pci_intr_alloc(pa, &sc->sc_pihp, NULL, 0) != 0) {
443 		printf(": couldn't map interrupt\n");
444 		goto bar1;
445 	}
446 	intrstr = pci_intr_string(pa->pa_pc, sc->sc_pihp[0], intrbuf, sizeof(intrbuf));
447 
448 	sc->sc_ih = pci_intr_establish_xname(pa->pa_pc, sc->sc_pihp[0], IPL_NET,
449 	    bwfm_pci_intr, sc, device_xname(self));
450 	if (sc->sc_ih == NULL) {
451 		printf(": couldn't establish interrupt");
452 		if (intrstr != NULL)
453 			printf(" at %s", intrstr);
454 		printf("\n");
455 		goto bar1;
456 	}
457 	printf(": %s\n", intrstr);
458 
459 	config_mountroot(self, bwfm_pci_attachhook);
460 	return;
461 
462 bar1:
463 	bus_space_unmap(sc->sc_tcm_iot, sc->sc_tcm_ioh, sc->sc_tcm_ios);
464 bar0:
465 	bus_space_unmap(sc->sc_reg_iot, sc->sc_reg_ioh, sc->sc_reg_ios);
466 }
467 
468 void
bwfm_pci_attachhook(device_t self)469 bwfm_pci_attachhook(device_t self)
470 {
471 	struct bwfm_pci_softc *sc = device_private(self);
472 	struct bwfm_softc *bwfm = (void *)sc;
473 	struct bwfm_pci_ringinfo ringinfo;
474 	struct bwfm_firmware_context fwctx;
475 	uint8_t *ucode;
476 	size_t ucsize;
477 	uint32_t d2h_w_idx_ptr, d2h_r_idx_ptr;
478 	uint32_t h2d_w_idx_ptr, h2d_r_idx_ptr;
479 	uint32_t idx_offset, reg;
480 	int i;
481 
482 	sc->sc_sc.sc_buscore_ops = &bwfm_pci_buscore_ops;
483 	if (bwfm_chip_attach(&sc->sc_sc) != 0) {
484 		aprint_error_dev(bwfm->sc_dev, "cannot attach chip\n");
485 		return;
486 	}
487 
488 	bwfm_pci_select_core(sc, BWFM_AGENT_CORE_PCIE2);
489 	bus_space_write_4(sc->sc_reg_iot, sc->sc_reg_ioh,
490 	    BWFM_PCI_PCIE2REG_CONFIGADDR, 0x4e0);
491 	reg = bus_space_read_4(sc->sc_reg_iot, sc->sc_reg_ioh,
492 	    BWFM_PCI_PCIE2REG_CONFIGDATA);
493 	bus_space_write_4(sc->sc_reg_iot, sc->sc_reg_ioh,
494 	    BWFM_PCI_PCIE2REG_CONFIGDATA, reg);
495 
496 	bwfm_firmware_context_init(&fwctx,
497 	    bwfm->sc_chip.ch_chip, bwfm->sc_chip.ch_chiprev, NULL,
498 	    BWFM_FWREQ(BWFM_FILETYPE_UCODE));
499 
500 	if (!bwfm_firmware_open(bwfm, bwfm_pci_fwtab, &fwctx)) {
501 		/* Error message already displayed. */
502 		goto err;
503 	}
504 
505 	ucode = bwfm_firmware_data(&fwctx, BWFM_FILETYPE_UCODE, &ucsize);
506 	KASSERT(ucode != NULL);
507 
508 	/* Retrieve RAM size from firmware. */
509 	if (ucsize >= BWFM_RAMSIZE + 8) {
510 		uint32_t *ramsize = (uint32_t *)&ucode[BWFM_RAMSIZE];
511 		if (letoh32(ramsize[0]) == BWFM_RAMSIZE_MAGIC)
512 			bwfm->sc_chip.ch_ramsize = letoh32(ramsize[1]);
513 	}
514 
515 	if (bwfm_pci_load_microcode(sc, ucode, ucsize) != 0) {
516 		aprint_error_dev(bwfm->sc_dev, "could not load microcode\n");
517 		goto err;
518 	}
519 
520 	bwfm_firmware_close(&fwctx);
521 
522 	sc->sc_shared_flags = bus_space_read_4(sc->sc_tcm_iot, sc->sc_tcm_ioh,
523 	    sc->sc_shared_address + BWFM_SHARED_INFO);
524 	sc->sc_shared_version = sc->sc_shared_flags;
525 	if (sc->sc_shared_version > BWFM_SHARED_INFO_MAX_VERSION ||
526 	    sc->sc_shared_version < BWFM_SHARED_INFO_MIN_VERSION) {
527 		aprint_error_dev(bwfm->sc_dev,
528 		    "PCIe version %d unsupported\n", sc->sc_shared_version);
529 		return;
530 	}
531 
532 	if (sc->sc_shared_flags & BWFM_SHARED_INFO_DMA_INDEX) {
533 		if (sc->sc_shared_flags & BWFM_SHARED_INFO_DMA_2B_IDX)
534 			sc->sc_dma_idx_sz = sizeof(uint16_t);
535 		else
536 			sc->sc_dma_idx_sz = sizeof(uint32_t);
537 	}
538 
539 	/* Maximum RX data buffers in the ring. */
540 	sc->sc_max_rxbufpost = bus_space_read_2(sc->sc_tcm_iot, sc->sc_tcm_ioh,
541 	    sc->sc_shared_address + BWFM_SHARED_MAX_RXBUFPOST);
542 	if (sc->sc_max_rxbufpost == 0)
543 		sc->sc_max_rxbufpost = BWFM_SHARED_MAX_RXBUFPOST_DEFAULT;
544 
545 	/* Alternative offset of data in a packet */
546 	sc->sc_rx_dataoffset = bus_space_read_4(sc->sc_tcm_iot, sc->sc_tcm_ioh,
547 	    sc->sc_shared_address + BWFM_SHARED_RX_DATAOFFSET);
548 
549 	/* For Power Management */
550 	sc->sc_htod_mb_data_addr = bus_space_read_4(sc->sc_tcm_iot, sc->sc_tcm_ioh,
551 	    sc->sc_shared_address + BWFM_SHARED_HTOD_MB_DATA_ADDR);
552 	sc->sc_dtoh_mb_data_addr = bus_space_read_4(sc->sc_tcm_iot, sc->sc_tcm_ioh,
553 	    sc->sc_shared_address + BWFM_SHARED_DTOH_MB_DATA_ADDR);
554 
555 	/* Ring information */
556 	sc->sc_ring_info_addr = bus_space_read_4(sc->sc_tcm_iot, sc->sc_tcm_ioh,
557 	    sc->sc_shared_address + BWFM_SHARED_RING_INFO_ADDR);
558 
559 	/* Firmware's "dmesg" */
560 	sc->sc_console_base_addr = bus_space_read_4(sc->sc_tcm_iot, sc->sc_tcm_ioh,
561 	    sc->sc_shared_address + BWFM_SHARED_CONSOLE_ADDR);
562 	sc->sc_console_buf_addr = bus_space_read_4(sc->sc_tcm_iot, sc->sc_tcm_ioh,
563 	    sc->sc_console_base_addr + BWFM_CONSOLE_BUFADDR);
564 	sc->sc_console_buf_size = bus_space_read_4(sc->sc_tcm_iot, sc->sc_tcm_ioh,
565 	    sc->sc_console_base_addr + BWFM_CONSOLE_BUFSIZE);
566 
567 	/* Read ring information. */
568 	bus_space_read_region_1(sc->sc_tcm_iot, sc->sc_tcm_ioh,
569 	    sc->sc_ring_info_addr, (void *)&ringinfo, sizeof(ringinfo));
570 
571 	if (sc->sc_shared_version >= 6) {
572 		sc->sc_max_submissionrings = le16toh(ringinfo.max_submissionrings);
573 		sc->sc_max_flowrings = le16toh(ringinfo.max_flowrings);
574 		sc->sc_max_completionrings = le16toh(ringinfo.max_completionrings);
575 	} else {
576 		sc->sc_max_submissionrings = le16toh(ringinfo.max_flowrings);
577 		sc->sc_max_flowrings = sc->sc_max_submissionrings -
578 		    BWFM_NUM_TX_MSGRINGS;
579 		sc->sc_max_completionrings = BWFM_NUM_RX_MSGRINGS;
580 	}
581 
582 	if (sc->sc_dma_idx_sz == 0) {
583 		d2h_w_idx_ptr = letoh32(ringinfo.d2h_w_idx_ptr);
584 		d2h_r_idx_ptr = letoh32(ringinfo.d2h_r_idx_ptr);
585 		h2d_w_idx_ptr = letoh32(ringinfo.h2d_w_idx_ptr);
586 		h2d_r_idx_ptr = letoh32(ringinfo.h2d_r_idx_ptr);
587 		idx_offset = sizeof(uint32_t);
588 	} else {
589 		uint64_t address;
590 
591 		/* Each TX/RX Ring has a Read and Write Ptr */
592 		sc->sc_dma_idx_bufsz = (sc->sc_max_submissionrings +
593 		    sc->sc_max_completionrings) * sc->sc_dma_idx_sz * 2;
594 		sc->sc_dma_idx_buf = bwfm_pci_dmamem_alloc(sc,
595 		    sc->sc_dma_idx_bufsz, 8);
596 		if (sc->sc_dma_idx_buf == NULL) {
597 			/* XXX: Fallback to TCM? */
598 			aprint_error_dev(bwfm->sc_dev,
599 			    "cannot allocate idx buf\n");
600 			return;
601 		}
602 
603 		idx_offset = sc->sc_dma_idx_sz;
604 		h2d_w_idx_ptr = 0;
605 		address = BWFM_PCI_DMA_DVA(sc->sc_dma_idx_buf);
606 		ringinfo.h2d_w_idx_hostaddr_low =
607 		    htole32(address & 0xffffffff);
608 		ringinfo.h2d_w_idx_hostaddr_high =
609 		    htole32(address >> 32);
610 
611 		h2d_r_idx_ptr = h2d_w_idx_ptr +
612 		    sc->sc_max_submissionrings * idx_offset;
613 		address += sc->sc_max_submissionrings * idx_offset;
614 		ringinfo.h2d_r_idx_hostaddr_low =
615 		    htole32(address & 0xffffffff);
616 		ringinfo.h2d_r_idx_hostaddr_high =
617 		    htole32(address >> 32);
618 
619 		d2h_w_idx_ptr = h2d_r_idx_ptr +
620 		    sc->sc_max_submissionrings * idx_offset;
621 		address += sc->sc_max_submissionrings * idx_offset;
622 		ringinfo.d2h_w_idx_hostaddr_low =
623 		    htole32(address & 0xffffffff);
624 		ringinfo.d2h_w_idx_hostaddr_high =
625 		    htole32(address >> 32);
626 
627 		d2h_r_idx_ptr = d2h_w_idx_ptr +
628 		    sc->sc_max_completionrings * idx_offset;
629 		address += sc->sc_max_completionrings * idx_offset;
630 		ringinfo.d2h_r_idx_hostaddr_low =
631 		    htole32(address & 0xffffffff);
632 		ringinfo.d2h_r_idx_hostaddr_high =
633 		    htole32(address >> 32);
634 
635 		bus_space_write_region_1(sc->sc_tcm_iot, sc->sc_tcm_ioh,
636 		    sc->sc_ring_info_addr, (void *)&ringinfo, sizeof(ringinfo));
637 	}
638 
639 	uint32_t ring_mem_ptr = letoh32(ringinfo.ringmem);
640 	/* TX ctrl ring: Send ctrl buffers, send IOCTLs */
641 	if (bwfm_pci_setup_ring(sc, &sc->sc_ctrl_submit, 64, 40,
642 	    h2d_w_idx_ptr, h2d_r_idx_ptr, 0, idx_offset,
643 	    &ring_mem_ptr))
644 		goto cleanup;
645 	/* TX rxpost ring: Send clean data mbufs for RX */
646 	if (bwfm_pci_setup_ring(sc, &sc->sc_rxpost_submit, 512, 32,
647 	    h2d_w_idx_ptr, h2d_r_idx_ptr, 1, idx_offset,
648 	    &ring_mem_ptr))
649 		goto cleanup;
650 	/* RX completion rings: recv our filled buffers back */
651 	if (bwfm_pci_setup_ring(sc, &sc->sc_ctrl_complete, 64, 24,
652 	    d2h_w_idx_ptr, d2h_r_idx_ptr, 0, idx_offset,
653 	    &ring_mem_ptr))
654 		goto cleanup;
655 	if (bwfm_pci_setup_ring(sc, &sc->sc_tx_complete, 1024, 16,
656 	    d2h_w_idx_ptr, d2h_r_idx_ptr, 1, idx_offset,
657 	    &ring_mem_ptr))
658 		goto cleanup;
659 	if (bwfm_pci_setup_ring(sc, &sc->sc_rx_complete, 512, 32,
660 	    d2h_w_idx_ptr, d2h_r_idx_ptr, 2, idx_offset,
661 	    &ring_mem_ptr))
662 		goto cleanup;
663 
664 	/* Dynamic TX rings for actual data */
665 	sc->sc_flowrings = kmem_zalloc(sc->sc_max_flowrings *
666 	    sizeof(struct bwfm_pci_msgring), KM_SLEEP);
667 	for (i = 0; i < sc->sc_max_flowrings; i++) {
668 		struct bwfm_pci_msgring *ring = &sc->sc_flowrings[i];
669 		ring->w_idx_addr = h2d_w_idx_ptr + (i + 2) * idx_offset;
670 		ring->r_idx_addr = h2d_r_idx_ptr + (i + 2) * idx_offset;
671 	}
672 
673 	pool_init(&sc->sc_flowring_pool, sizeof(struct bwfm_cmd_flowring_create),
674 	    0, 0, 0, "bwfmpl", NULL, IPL_NET);
675 
676 	/* Scratch and ring update buffers for firmware */
677 	if ((sc->sc_scratch_buf = bwfm_pci_dmamem_alloc(sc,
678 	    BWFM_DMA_D2H_SCRATCH_BUF_LEN, 8)) == NULL)
679 		goto cleanup;
680 	bus_space_write_4(sc->sc_tcm_iot, sc->sc_tcm_ioh,
681 	    sc->sc_shared_address + BWFM_SHARED_DMA_SCRATCH_ADDR_LOW,
682 	    BWFM_PCI_DMA_DVA(sc->sc_scratch_buf) & 0xffffffff);
683 	bus_space_write_4(sc->sc_tcm_iot, sc->sc_tcm_ioh,
684 	    sc->sc_shared_address + BWFM_SHARED_DMA_SCRATCH_ADDR_HIGH,
685 	    BWFM_PCI_DMA_DVA(sc->sc_scratch_buf) >> 32);
686 	bus_space_write_4(sc->sc_tcm_iot, sc->sc_tcm_ioh,
687 	    sc->sc_shared_address + BWFM_SHARED_DMA_SCRATCH_LEN,
688 	    BWFM_DMA_D2H_SCRATCH_BUF_LEN);
689 
690 	if ((sc->sc_ringupd_buf = bwfm_pci_dmamem_alloc(sc,
691 	    BWFM_DMA_D2H_RINGUPD_BUF_LEN, 8)) == NULL)
692 		goto cleanup;
693 	bus_space_write_4(sc->sc_tcm_iot, sc->sc_tcm_ioh,
694 	    sc->sc_shared_address + BWFM_SHARED_DMA_RINGUPD_ADDR_LOW,
695 	    BWFM_PCI_DMA_DVA(sc->sc_ringupd_buf) & 0xffffffff);
696 	bus_space_write_4(sc->sc_tcm_iot, sc->sc_tcm_ioh,
697 	    sc->sc_shared_address + BWFM_SHARED_DMA_RINGUPD_ADDR_HIGH,
698 	    BWFM_PCI_DMA_DVA(sc->sc_ringupd_buf) >> 32);
699 	bus_space_write_4(sc->sc_tcm_iot, sc->sc_tcm_ioh,
700 	    sc->sc_shared_address + BWFM_SHARED_DMA_RINGUPD_LEN,
701 	    BWFM_DMA_D2H_RINGUPD_BUF_LEN);
702 
703 	if ((sc->sc_ioctl_buf = bwfm_pci_dmamem_alloc(sc,
704 	    BWFM_DMA_H2D_IOCTL_BUF_LEN, 8)) == NULL)
705 		goto cleanup;
706 
707 	if (workqueue_create(&sc->flowring_wq, "bwfmflow",
708 	    bwfm_pci_flowring_create_cb, sc, PRI_SOFTNET, IPL_NET, 0))
709 		goto cleanup;
710 
711 	bwfm_pci_select_core(sc, BWFM_AGENT_CORE_PCIE2);
712 	bwfm_pci_intr_enable(sc);
713 
714 	/* Maps RX mbufs to a packet id and back. */
715 	sc->sc_rx_pkts.npkt = BWFM_NUM_RX_PKTIDS;
716 	sc->sc_rx_pkts.pkts = kmem_zalloc(BWFM_NUM_RX_PKTIDS *
717 	    sizeof(struct bwfm_pci_buf), KM_SLEEP);
718 	for (i = 0; i < BWFM_NUM_RX_PKTIDS; i++)
719 		bus_dmamap_create(sc->sc_dmat, MSGBUF_MAX_PKT_SIZE,
720 		    BWFM_NUM_RX_DESCS, MSGBUF_MAX_PKT_SIZE, 0, BUS_DMA_WAITOK,
721 		    &sc->sc_rx_pkts.pkts[i].bb_map);
722 
723 	/* Maps TX mbufs to a packet id and back. */
724 	sc->sc_tx_pkts.npkt = BWFM_NUM_TX_PKTIDS;
725 	sc->sc_tx_pkts.pkts = kmem_zalloc(BWFM_NUM_TX_PKTIDS
726 	    * sizeof(struct bwfm_pci_buf), KM_SLEEP);
727 	for (i = 0; i < BWFM_NUM_TX_PKTIDS; i++)
728 		bus_dmamap_create(sc->sc_dmat, MSGBUF_MAX_PKT_SIZE,
729 		    BWFM_NUM_TX_DESCS, MSGBUF_MAX_PKT_SIZE, 0, BUS_DMA_WAITOK,
730 		    &sc->sc_tx_pkts.pkts[i].bb_map);
731 
732 	/*
733 	 * For whatever reason, could also be a bug somewhere in this
734 	 * driver, the firmware needs a bunch of RX buffers otherwise
735 	 * it won't send any RX complete messages.  64 buffers don't
736 	 * suffice, but 128 buffers are enough.
737 	 */
738 	if_rxr_init(&sc->sc_rxbuf_ring, 128, sc->sc_max_rxbufpost);
739 	if_rxr_init(&sc->sc_ioctl_ring, 8, 8);
740 	if_rxr_init(&sc->sc_event_ring, 8, 8);
741 	bwfm_pci_fill_rx_rings(sc);
742 
743 
744 #ifdef BWFM_DEBUG
745 	sc->sc_console_readidx = 0;
746 	bwfm_pci_debug_console(sc);
747 #endif
748 
749 	sc->sc_ioctl_poll = 1;
750 	sc->sc_sc.sc_bus_ops = &bwfm_pci_bus_ops;
751 	sc->sc_sc.sc_proto_ops = &bwfm_pci_msgbuf_ops;
752 	bwfm_attach(&sc->sc_sc);
753 	sc->sc_ioctl_poll = 0;
754 	return;
755 
756 cleanup:
757 	if (sc->flowring_wq != NULL)
758 		workqueue_destroy(sc->flowring_wq);
759 	if (sc->sc_ih != NULL) {
760 		pci_intr_disestablish(sc->sc_pc, sc->sc_ih);
761 		pci_intr_release(sc->sc_pc, sc->sc_pihp, 1);
762 	}
763 	if (sc->sc_ioctl_buf)
764 		bwfm_pci_dmamem_free(sc, sc->sc_ioctl_buf);
765 	if (sc->sc_ringupd_buf)
766 		bwfm_pci_dmamem_free(sc, sc->sc_ringupd_buf);
767 	if (sc->sc_scratch_buf)
768 		bwfm_pci_dmamem_free(sc, sc->sc_scratch_buf);
769 	if (sc->sc_rx_complete.ring)
770 		bwfm_pci_dmamem_free(sc, sc->sc_rx_complete.ring);
771 	if (sc->sc_tx_complete.ring)
772 		bwfm_pci_dmamem_free(sc, sc->sc_tx_complete.ring);
773 	if (sc->sc_ctrl_complete.ring)
774 		bwfm_pci_dmamem_free(sc, sc->sc_ctrl_complete.ring);
775 	if (sc->sc_rxpost_submit.ring)
776 		bwfm_pci_dmamem_free(sc, sc->sc_rxpost_submit.ring);
777 	if (sc->sc_ctrl_submit.ring)
778 		bwfm_pci_dmamem_free(sc, sc->sc_ctrl_submit.ring);
779 	if (sc->sc_dma_idx_buf)
780 		bwfm_pci_dmamem_free(sc, sc->sc_dma_idx_buf);
781 
782  err:
783 	bwfm_firmware_close(&fwctx);
784 }
785 
786 int
bwfm_pci_load_microcode(struct bwfm_pci_softc * sc,const u_char * ucode,size_t size)787 bwfm_pci_load_microcode(struct bwfm_pci_softc *sc, const u_char *ucode, size_t size)
788 {
789 	struct bwfm_softc *bwfm = (void *)sc;
790 	struct bwfm_core *core;
791 	uint32_t shared;
792 	int i;
793 
794 	if (bwfm->sc_chip.ch_chip == BRCM_CC_43602_CHIP_ID) {
795 		bwfm_pci_select_core(sc, BWFM_AGENT_CORE_ARM_CR4);
796 		bus_space_write_4(sc->sc_reg_iot, sc->sc_reg_ioh,
797 		    BWFM_PCI_ARMCR4REG_BANKIDX, 5);
798 		bus_space_write_4(sc->sc_reg_iot, sc->sc_reg_ioh,
799 		    BWFM_PCI_ARMCR4REG_BANKPDA, 0);
800 		bus_space_write_4(sc->sc_reg_iot, sc->sc_reg_ioh,
801 		    BWFM_PCI_ARMCR4REG_BANKIDX, 7);
802 		bus_space_write_4(sc->sc_reg_iot, sc->sc_reg_ioh,
803 		    BWFM_PCI_ARMCR4REG_BANKPDA, 0);
804 	}
805 
806 	for (i = 0; i < size; i++)
807 		bus_space_write_1(sc->sc_tcm_iot, sc->sc_tcm_ioh,
808 		    bwfm->sc_chip.ch_rambase + i, ucode[i]);
809 
810 	/* Firmware replaces this with a pointer once up. */
811 	bus_space_write_4(sc->sc_tcm_iot, sc->sc_tcm_ioh,
812 	    bwfm->sc_chip.ch_rambase + bwfm->sc_chip.ch_ramsize - 4, 0);
813 
814 	/* TODO: restore NVRAM */
815 
816 	/* Load reset vector from firmware and kickstart core. */
817 	if (bwfm->sc_chip.ch_chip == BRCM_CC_43602_CHIP_ID) {
818 		core = bwfm_chip_get_core(bwfm, BWFM_AGENT_INTERNAL_MEM);
819 		bwfm->sc_chip.ch_core_reset(bwfm, core, 0, 0, 0);
820 	}
821 	bwfm_chip_set_active(bwfm, *(const uint32_t *)ucode);
822 
823 	for (i = 0; i < 40; i++) {
824 		delay(50 * 1000);
825 		shared = bus_space_read_4(sc->sc_tcm_iot, sc->sc_tcm_ioh,
826 		    bwfm->sc_chip.ch_rambase + bwfm->sc_chip.ch_ramsize - 4);
827 		if (shared)
828 			break;
829 	}
830 	if (!shared) {
831 		printf("%s: firmware did not come up\n", DEVNAME(sc));
832 		return 1;
833 	}
834 
835 	sc->sc_shared_address = shared;
836 	return 0;
837 }
838 
839 int
bwfm_pci_detach(device_t self,int flags)840 bwfm_pci_detach(device_t self, int flags)
841 {
842 	struct bwfm_pci_softc *sc = device_private(self);
843 
844 	bwfm_detach(&sc->sc_sc, flags);
845 
846 	/* FIXME: free RX buffers */
847 	/* FIXME: free TX buffers */
848 	/* FIXME: free more memory */
849 
850 	kmem_free(sc->sc_flowrings, sc->sc_max_flowrings
851 	    * sizeof(struct bwfm_pci_msgring));
852 	pool_destroy(&sc->sc_flowring_pool);
853 
854 	workqueue_destroy(sc->flowring_wq);
855 	pci_intr_disestablish(sc->sc_pc, sc->sc_ih);
856 	pci_intr_release(sc->sc_pc, sc->sc_pihp, 1);
857 	bwfm_pci_dmamem_free(sc, sc->sc_ioctl_buf);
858 	bwfm_pci_dmamem_free(sc, sc->sc_ringupd_buf);
859 	bwfm_pci_dmamem_free(sc, sc->sc_scratch_buf);
860 	bwfm_pci_dmamem_free(sc, sc->sc_rx_complete.ring);
861 	bwfm_pci_dmamem_free(sc, sc->sc_tx_complete.ring);
862 	bwfm_pci_dmamem_free(sc, sc->sc_ctrl_complete.ring);
863 	bwfm_pci_dmamem_free(sc, sc->sc_rxpost_submit.ring);
864 	bwfm_pci_dmamem_free(sc, sc->sc_ctrl_submit.ring);
865 	bwfm_pci_dmamem_free(sc, sc->sc_dma_idx_buf);
866 	return 0;
867 }
868 
869 /* DMA code */
870 struct bwfm_pci_dmamem *
bwfm_pci_dmamem_alloc(struct bwfm_pci_softc * sc,bus_size_t size,bus_size_t align)871 bwfm_pci_dmamem_alloc(struct bwfm_pci_softc *sc, bus_size_t size, bus_size_t align)
872 {
873 	struct bwfm_pci_dmamem *bdm;
874 	int nsegs;
875 
876 	bdm = kmem_zalloc(sizeof(*bdm), KM_SLEEP);
877 	bdm->bdm_size = size;
878 
879 	if (bus_dmamap_create(sc->sc_dmat, size, 1, size, 0,
880 	    BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW, &bdm->bdm_map) != 0)
881 		goto bdmfree;
882 
883 	if (bus_dmamem_alloc(sc->sc_dmat, size, align, 0, &bdm->bdm_seg, 1,
884 	    &nsegs, BUS_DMA_WAITOK) != 0)
885 		goto destroy;
886 
887 	if (bus_dmamem_map(sc->sc_dmat, &bdm->bdm_seg, nsegs, size,
888 	    (void **) &bdm->bdm_kva, BUS_DMA_WAITOK | BUS_DMA_COHERENT) != 0)
889 		goto free;
890 
891 	if (bus_dmamap_load(sc->sc_dmat, bdm->bdm_map, bdm->bdm_kva, size,
892 	    NULL, BUS_DMA_WAITOK) != 0)
893 		goto unmap;
894 
895 	bzero(bdm->bdm_kva, size);
896 
897 	return (bdm);
898 
899 unmap:
900 	bus_dmamem_unmap(sc->sc_dmat, bdm->bdm_kva, size);
901 free:
902 	bus_dmamem_free(sc->sc_dmat, &bdm->bdm_seg, 1);
903 destroy:
904 	bus_dmamap_destroy(sc->sc_dmat, bdm->bdm_map);
905 bdmfree:
906 	kmem_free(bdm, sizeof(*bdm));
907 
908 	return (NULL);
909 }
910 
911 void
bwfm_pci_dmamem_free(struct bwfm_pci_softc * sc,struct bwfm_pci_dmamem * bdm)912 bwfm_pci_dmamem_free(struct bwfm_pci_softc *sc, struct bwfm_pci_dmamem *bdm)
913 {
914 	bus_dmamap_unload(sc->sc_dmat, bdm->bdm_map);
915 	bus_dmamem_unmap(sc->sc_dmat, bdm->bdm_kva, bdm->bdm_size);
916 	bus_dmamem_free(sc->sc_dmat, &bdm->bdm_seg, 1);
917 	bus_dmamap_destroy(sc->sc_dmat, bdm->bdm_map);
918 	kmem_free(bdm, sizeof(*bdm));
919 }
920 
921 /*
922  * We need a simple mapping from a packet ID to mbufs, because when
923  * a transfer completed, we only know the ID so we have to look up
924  * the memory for the ID.  This simply looks for an empty slot.
925  */
926 int
bwfm_pci_pktid_avail(struct bwfm_pci_softc * sc,struct bwfm_pci_pkts * pkts)927 bwfm_pci_pktid_avail(struct bwfm_pci_softc *sc, struct bwfm_pci_pkts *pkts)
928 {
929 	int i, idx;
930 
931 	idx = pkts->last + 1;
932 	for (i = 0; i < pkts->npkt; i++) {
933 		if (idx == pkts->npkt)
934 			idx = 0;
935 		if (pkts->pkts[idx].bb_m == NULL)
936 			return 0;
937 		idx++;
938 	}
939 	return ENOBUFS;
940 }
941 
942 int
bwfm_pci_pktid_new(struct bwfm_pci_softc * sc,struct bwfm_pci_pkts * pkts,struct mbuf ** mp,uint32_t * pktid,paddr_t * paddr)943 bwfm_pci_pktid_new(struct bwfm_pci_softc *sc, struct bwfm_pci_pkts *pkts,
944     struct mbuf **mp, uint32_t *pktid, paddr_t *paddr)
945 {
946 	int i, idx;
947 
948 	idx = pkts->last + 1;
949 	for (i = 0; i < pkts->npkt; i++) {
950 		if (idx == pkts->npkt)
951 			idx = 0;
952 		if (pkts->pkts[idx].bb_m == NULL) {
953 			if (bus_dmamap_load_mbuf(sc->sc_dmat,
954 			    pkts->pkts[idx].bb_map, *mp, BUS_DMA_NOWAIT) != 0) {
955 				/*
956 				 * Didn't fit.  Maybe it has too many
957 				 * segments.  If it has only one
958 				 * segment, fail; otherwise try to
959 				 * compact it into a single mbuf
960 				 * segment.
961 				 */
962 				if ((*mp)->m_next == NULL)
963 					return ENOBUFS;
964 				struct mbuf *m0 = MCLGETI(NULL, M_DONTWAIT,
965 				    NULL, MSGBUF_MAX_PKT_SIZE);
966 				if (m0 == NULL)
967 					return ENOBUFS;
968 				m_copydata(*mp, 0, (*mp)->m_pkthdr.len,
969 				    mtod(m0, void *));
970 				m0->m_pkthdr.len = m0->m_len =
971 				    (*mp)->m_pkthdr.len;
972 				m_freem(*mp);
973 				*mp = m0;
974 				if (bus_dmamap_load_mbuf(sc->sc_dmat,
975 				    pkts->pkts[idx].bb_map, *mp, BUS_DMA_NOWAIT) != 0)
976 					return EFBIG;
977 			}
978 			bus_dmamap_sync(sc->sc_dmat, pkts->pkts[idx].bb_map,
979 			    0, pkts->pkts[idx].bb_map->dm_mapsize,
980 			    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
981 			pkts->last = idx;
982 			pkts->pkts[idx].bb_m = *mp;
983 			*pktid = idx;
984 			*paddr = pkts->pkts[idx].bb_map->dm_segs[0].ds_addr;
985 			return 0;
986 		}
987 		idx++;
988 	}
989 	return ENOBUFS;
990 }
991 
992 struct mbuf *
bwfm_pci_pktid_free(struct bwfm_pci_softc * sc,struct bwfm_pci_pkts * pkts,uint32_t pktid)993 bwfm_pci_pktid_free(struct bwfm_pci_softc *sc, struct bwfm_pci_pkts *pkts,
994     uint32_t pktid)
995 {
996 	struct mbuf *m;
997 
998 	if (pktid >= pkts->npkt || pkts->pkts[pktid].bb_m == NULL)
999 		return NULL;
1000 	bus_dmamap_sync(sc->sc_dmat, pkts->pkts[pktid].bb_map, 0,
1001 	    pkts->pkts[pktid].bb_map->dm_mapsize,
1002 	    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1003 	bus_dmamap_unload(sc->sc_dmat, pkts->pkts[pktid].bb_map);
1004 	m = pkts->pkts[pktid].bb_m;
1005 	pkts->pkts[pktid].bb_m = NULL;
1006 	return m;
1007 }
1008 
1009 void
bwfm_pci_fill_rx_rings(struct bwfm_pci_softc * sc)1010 bwfm_pci_fill_rx_rings(struct bwfm_pci_softc *sc)
1011 {
1012 	bwfm_pci_fill_rx_buf_ring(sc);
1013 	bwfm_pci_fill_rx_ioctl_ring(sc, &sc->sc_ioctl_ring,
1014 	    MSGBUF_TYPE_IOCTLRESP_BUF_POST);
1015 	bwfm_pci_fill_rx_ioctl_ring(sc, &sc->sc_event_ring,
1016 	    MSGBUF_TYPE_EVENT_BUF_POST);
1017 }
1018 
1019 void
bwfm_pci_fill_rx_ioctl_ring(struct bwfm_pci_softc * sc,struct if_rxring * rxring,uint32_t msgtype)1020 bwfm_pci_fill_rx_ioctl_ring(struct bwfm_pci_softc *sc, struct if_rxring *rxring,
1021     uint32_t msgtype)
1022 {
1023 	struct msgbuf_rx_ioctl_resp_or_event *req;
1024 	struct mbuf *m;
1025 	uint32_t pktid;
1026 	paddr_t paddr;
1027 	int s, slots;
1028 	uint64_t devaddr;
1029 
1030 	s = splnet();
1031 	for (slots = if_rxr_get(rxring, 8); slots > 0; slots--) {
1032 		if (bwfm_pci_pktid_avail(sc, &sc->sc_rx_pkts))
1033 			break;
1034 		req = bwfm_pci_ring_write_reserve(sc, &sc->sc_ctrl_submit);
1035 		if (req == NULL)
1036 			break;
1037 		m = MCLGETI(NULL, M_DONTWAIT, NULL, MSGBUF_MAX_PKT_SIZE);
1038 		if (m == NULL) {
1039 			bwfm_pci_ring_write_cancel(sc, &sc->sc_ctrl_submit, 1);
1040 			break;
1041 		}
1042 		m->m_len = m->m_pkthdr.len = MSGBUF_MAX_PKT_SIZE;
1043 		if (bwfm_pci_pktid_new(sc, &sc->sc_rx_pkts, &m, &pktid, &paddr)) {
1044 			bwfm_pci_ring_write_cancel(sc, &sc->sc_ctrl_submit, 1);
1045 			m_freem(m);
1046 			break;
1047 		}
1048 		devaddr = paddr;
1049 		memset(req, 0, sizeof(*req));
1050 		req->msg.msgtype = msgtype;
1051 		req->msg.request_id = htole32(pktid);
1052 		req->host_buf_len = htole16(MSGBUF_MAX_PKT_SIZE);
1053 		req->host_buf_addr.high_addr = htole32(devaddr >> 32);
1054 		req->host_buf_addr.low_addr = htole32(devaddr & 0xffffffff);
1055 		bwfm_pci_ring_write_commit(sc, &sc->sc_ctrl_submit);
1056 	}
1057 	if_rxr_put(rxring, slots);
1058 	splx(s);
1059 }
1060 
1061 void
bwfm_pci_fill_rx_buf_ring(struct bwfm_pci_softc * sc)1062 bwfm_pci_fill_rx_buf_ring(struct bwfm_pci_softc *sc)
1063 {
1064 	struct msgbuf_rx_bufpost *req;
1065 	struct mbuf *m;
1066 	uint32_t pktid;
1067 	paddr_t paddr;
1068 	int s, slots;
1069 	uint64_t devaddr;
1070 
1071 	s = splnet();
1072 	for (slots = if_rxr_get(&sc->sc_rxbuf_ring, sc->sc_max_rxbufpost);
1073 	    slots > 0; slots--) {
1074 		if (bwfm_pci_pktid_avail(sc, &sc->sc_rx_pkts))
1075 			break;
1076 		req = bwfm_pci_ring_write_reserve(sc, &sc->sc_rxpost_submit);
1077 		if (req == NULL)
1078 			break;
1079 		m = MCLGETI(NULL, M_DONTWAIT, NULL, MSGBUF_MAX_PKT_SIZE);
1080 		if (m == NULL) {
1081 			bwfm_pci_ring_write_cancel(sc, &sc->sc_rxpost_submit, 1);
1082 			break;
1083 		}
1084 		m->m_len = m->m_pkthdr.len = MSGBUF_MAX_PKT_SIZE;
1085 		if (bwfm_pci_pktid_new(sc, &sc->sc_rx_pkts, &m, &pktid, &paddr)) {
1086 			bwfm_pci_ring_write_cancel(sc, &sc->sc_rxpost_submit, 1);
1087 			m_freem(m);
1088 			break;
1089 		}
1090 		devaddr = paddr;
1091 		memset(req, 0, sizeof(*req));
1092 		req->msg.msgtype = MSGBUF_TYPE_RXBUF_POST;
1093 		req->msg.request_id = htole32(pktid);
1094 		req->data_buf_len = htole16(MSGBUF_MAX_PKT_SIZE);
1095 		req->data_buf_addr.high_addr = htole32(devaddr >> 32);
1096 		req->data_buf_addr.low_addr = htole32(devaddr & 0xffffffff);
1097 		bwfm_pci_ring_write_commit(sc, &sc->sc_rxpost_submit);
1098 	}
1099 	if_rxr_put(&sc->sc_rxbuf_ring, slots);
1100 	splx(s);
1101 }
1102 
1103 int
bwfm_pci_setup_ring(struct bwfm_pci_softc * sc,struct bwfm_pci_msgring * ring,int nitem,size_t itemsz,uint32_t w_idx,uint32_t r_idx,int idx,uint32_t idx_off,uint32_t * ring_mem)1104 bwfm_pci_setup_ring(struct bwfm_pci_softc *sc, struct bwfm_pci_msgring *ring,
1105     int nitem, size_t itemsz, uint32_t w_idx, uint32_t r_idx,
1106     int idx, uint32_t idx_off, uint32_t *ring_mem)
1107 {
1108 	ring->w_idx_addr = w_idx + idx * idx_off;
1109 	ring->r_idx_addr = r_idx + idx * idx_off;
1110 	ring->nitem = nitem;
1111 	ring->itemsz = itemsz;
1112 	bwfm_pci_ring_write_rptr(sc, ring);
1113 	bwfm_pci_ring_write_wptr(sc, ring);
1114 
1115 	ring->ring = bwfm_pci_dmamem_alloc(sc, nitem * itemsz, 8);
1116 	if (ring->ring == NULL)
1117 		return ENOMEM;
1118 	bus_space_write_4(sc->sc_tcm_iot, sc->sc_tcm_ioh,
1119 	    *ring_mem + BWFM_RING_MEM_BASE_ADDR_LOW,
1120 	    BWFM_PCI_DMA_DVA(ring->ring) & 0xffffffff);
1121 	bus_space_write_4(sc->sc_tcm_iot, sc->sc_tcm_ioh,
1122 	    *ring_mem + BWFM_RING_MEM_BASE_ADDR_HIGH,
1123 	    BWFM_PCI_DMA_DVA(ring->ring) >> 32);
1124 	bus_space_write_2(sc->sc_tcm_iot, sc->sc_tcm_ioh,
1125 	    *ring_mem + BWFM_RING_MAX_ITEM, nitem);
1126 	bus_space_write_2(sc->sc_tcm_iot, sc->sc_tcm_ioh,
1127 	    *ring_mem + BWFM_RING_LEN_ITEMS, itemsz);
1128 	*ring_mem = *ring_mem + BWFM_RING_MEM_SZ;
1129 	return 0;
1130 }
1131 
1132 int
bwfm_pci_setup_flowring(struct bwfm_pci_softc * sc,struct bwfm_pci_msgring * ring,int nitem,size_t itemsz)1133 bwfm_pci_setup_flowring(struct bwfm_pci_softc *sc, struct bwfm_pci_msgring *ring,
1134     int nitem, size_t itemsz)
1135 {
1136 	ring->w_ptr = 0;
1137 	ring->r_ptr = 0;
1138 	ring->nitem = nitem;
1139 	ring->itemsz = itemsz;
1140 	bwfm_pci_ring_write_rptr(sc, ring);
1141 	bwfm_pci_ring_write_wptr(sc, ring);
1142 
1143 	ring->ring = bwfm_pci_dmamem_alloc(sc, nitem * itemsz, 8);
1144 	if (ring->ring == NULL)
1145 		return ENOMEM;
1146 	return 0;
1147 }
1148 
1149 /* Ring helpers */
1150 void
bwfm_pci_ring_bell(struct bwfm_pci_softc * sc,struct bwfm_pci_msgring * ring)1151 bwfm_pci_ring_bell(struct bwfm_pci_softc *sc,
1152     struct bwfm_pci_msgring *ring)
1153 {
1154 	bus_space_write_4(sc->sc_reg_iot, sc->sc_reg_ioh,
1155 	    BWFM_PCI_PCIE2REG_H2D_MAILBOX, 1);
1156 }
1157 
1158 void
bwfm_pci_ring_update_rptr(struct bwfm_pci_softc * sc,struct bwfm_pci_msgring * ring)1159 bwfm_pci_ring_update_rptr(struct bwfm_pci_softc *sc,
1160     struct bwfm_pci_msgring *ring)
1161 {
1162 	if (sc->sc_dma_idx_sz == 0) {
1163 		ring->r_ptr = bus_space_read_2(sc->sc_tcm_iot,
1164 		    sc->sc_tcm_ioh, ring->r_idx_addr);
1165 	} else {
1166 		bus_dmamap_sync(sc->sc_dmat,
1167 		    BWFM_PCI_DMA_MAP(sc->sc_dma_idx_buf), ring->r_idx_addr,
1168 		    sizeof(uint16_t), BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1169 		ring->r_ptr = *(uint16_t *)(BWFM_PCI_DMA_KVA(sc->sc_dma_idx_buf)
1170 		    + ring->r_idx_addr);
1171 	}
1172 }
1173 
1174 static u_int
if_rxr_get(struct if_rxring * rxr,unsigned int max)1175 if_rxr_get(struct if_rxring *rxr, unsigned int max)
1176 {
1177 	u_int taken = MIN(max, (rxr->rxr_total - rxr->rxr_inuse));
1178 
1179 	KASSERTMSG(rxr->rxr_inuse + taken <= rxr->rxr_total,
1180 			"rxr->rxr_inuse: %d\n"
1181 			"taken: %d\n"
1182 			"rxr->rxr_total: %d\n",
1183 			rxr->rxr_inuse, taken, rxr->rxr_total);
1184 	rxr->rxr_inuse += taken;
1185 
1186 	return taken;
1187 }
1188 
1189 static void
if_rxr_put(struct if_rxring * rxr,unsigned int n)1190 if_rxr_put(struct if_rxring *rxr, unsigned int n)
1191 {
1192 	KASSERTMSG(rxr->rxr_inuse >= n,
1193 			"rxr->rxr_inuse: %d\n"
1194 			"n: %d\n"
1195 			"rxr->rxr_total: %d\n",
1196 			rxr->rxr_inuse, n, rxr->rxr_total);
1197 
1198 	rxr->rxr_inuse -= n;
1199 }
1200 
1201 static void
if_rxr_init(struct if_rxring * rxr,unsigned int lwm __unused,unsigned int hwm)1202 if_rxr_init(struct if_rxring *rxr, unsigned int lwm __unused, unsigned int hwm)
1203 {
1204 	(void) lwm;
1205 
1206 	rxr->rxr_total = hwm;
1207 	rxr->rxr_inuse = 0;
1208 }
1209 
1210 void
bwfm_pci_ring_update_wptr(struct bwfm_pci_softc * sc,struct bwfm_pci_msgring * ring)1211 bwfm_pci_ring_update_wptr(struct bwfm_pci_softc *sc,
1212     struct bwfm_pci_msgring *ring)
1213 {
1214 	if (sc->sc_dma_idx_sz == 0) {
1215 		ring->w_ptr = bus_space_read_2(sc->sc_tcm_iot,
1216 		    sc->sc_tcm_ioh, ring->w_idx_addr);
1217 	} else {
1218 		ring->w_ptr = *(uint16_t *)(BWFM_PCI_DMA_KVA(sc->sc_dma_idx_buf)
1219 		    + ring->w_idx_addr);
1220 		bus_dmamap_sync(sc->sc_dmat,
1221 		    BWFM_PCI_DMA_MAP(sc->sc_dma_idx_buf), ring->w_idx_addr,
1222 		    sizeof(uint16_t), BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1223 	}
1224 }
1225 
1226 void
bwfm_pci_ring_write_rptr(struct bwfm_pci_softc * sc,struct bwfm_pci_msgring * ring)1227 bwfm_pci_ring_write_rptr(struct bwfm_pci_softc *sc,
1228     struct bwfm_pci_msgring *ring)
1229 {
1230 	if (sc->sc_dma_idx_sz == 0) {
1231 		bus_space_write_2(sc->sc_tcm_iot, sc->sc_tcm_ioh,
1232 		    ring->r_idx_addr, ring->r_ptr);
1233 	} else {
1234 		*(uint16_t *)(BWFM_PCI_DMA_KVA(sc->sc_dma_idx_buf)
1235 		    + ring->r_idx_addr) = ring->r_ptr;
1236 		bus_dmamap_sync(sc->sc_dmat,
1237 		    BWFM_PCI_DMA_MAP(sc->sc_dma_idx_buf), ring->r_idx_addr,
1238 		    sizeof(uint16_t), BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1239 	}
1240 }
1241 
1242 void
bwfm_pci_ring_write_wptr(struct bwfm_pci_softc * sc,struct bwfm_pci_msgring * ring)1243 bwfm_pci_ring_write_wptr(struct bwfm_pci_softc *sc,
1244     struct bwfm_pci_msgring *ring)
1245 {
1246 	if (sc->sc_dma_idx_sz == 0) {
1247 		bus_space_write_2(sc->sc_tcm_iot, sc->sc_tcm_ioh,
1248 		    ring->w_idx_addr, ring->w_ptr);
1249 	} else {
1250 		*(uint16_t *)(BWFM_PCI_DMA_KVA(sc->sc_dma_idx_buf)
1251 		    + ring->w_idx_addr) = ring->w_ptr;
1252 		bus_dmamap_sync(sc->sc_dmat,
1253 		    BWFM_PCI_DMA_MAP(sc->sc_dma_idx_buf), ring->w_idx_addr,
1254 		    sizeof(uint16_t), BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1255 	}
1256 }
1257 
1258 /*
1259  * Retrieve a free descriptor to put new stuff in, but don't commit
1260  * to it yet so we can rollback later if any error occurs.
1261  */
1262 void *
bwfm_pci_ring_write_reserve(struct bwfm_pci_softc * sc,struct bwfm_pci_msgring * ring)1263 bwfm_pci_ring_write_reserve(struct bwfm_pci_softc *sc,
1264     struct bwfm_pci_msgring *ring)
1265 {
1266 	int available;
1267 	char *ret;
1268 
1269 	bwfm_pci_ring_update_rptr(sc, ring);
1270 
1271 	if (ring->r_ptr > ring->w_ptr)
1272 		available = ring->r_ptr - ring->w_ptr;
1273 	else
1274 		available = ring->r_ptr + (ring->nitem - ring->w_ptr);
1275 
1276 	if (available < 1)
1277 		return NULL;
1278 
1279 	ret = BWFM_PCI_DMA_KVA(ring->ring) + (ring->w_ptr * ring->itemsz);
1280 	ring->w_ptr += 1;
1281 	if (ring->w_ptr == ring->nitem)
1282 		ring->w_ptr = 0;
1283 	return ret;
1284 }
1285 
1286 void *
bwfm_pci_ring_write_reserve_multi(struct bwfm_pci_softc * sc,struct bwfm_pci_msgring * ring,int count,int * avail)1287 bwfm_pci_ring_write_reserve_multi(struct bwfm_pci_softc *sc,
1288     struct bwfm_pci_msgring *ring, int count, int *avail)
1289 {
1290 	int available;
1291 	char *ret;
1292 
1293 	bwfm_pci_ring_update_rptr(sc, ring);
1294 
1295 	if (ring->r_ptr > ring->w_ptr)
1296 		available = ring->r_ptr - ring->w_ptr;
1297 	else
1298 		available = ring->r_ptr + (ring->nitem - ring->w_ptr);
1299 
1300 	if (available < 1)
1301 		return NULL;
1302 
1303 	ret = BWFM_PCI_DMA_KVA(ring->ring) + (ring->w_ptr * ring->itemsz);
1304 	*avail = uimin(count, available - 1);
1305 	if (*avail + ring->w_ptr > ring->nitem)
1306 		*avail = ring->nitem - ring->w_ptr;
1307 	ring->w_ptr += *avail;
1308 	if (ring->w_ptr == ring->nitem)
1309 		ring->w_ptr = 0;
1310 	return ret;
1311 }
1312 
1313 /*
1314  * Read number of descriptors available (submitted by the firmware)
1315  * and retrieve pointer to first descriptor.
1316  */
1317 void *
bwfm_pci_ring_read_avail(struct bwfm_pci_softc * sc,struct bwfm_pci_msgring * ring,int * avail)1318 bwfm_pci_ring_read_avail(struct bwfm_pci_softc *sc,
1319     struct bwfm_pci_msgring *ring, int *avail)
1320 {
1321 	bwfm_pci_ring_update_wptr(sc, ring);
1322 
1323 	if (ring->w_ptr >= ring->r_ptr)
1324 		*avail = ring->w_ptr - ring->r_ptr;
1325 	else
1326 		*avail = ring->nitem - ring->r_ptr;
1327 
1328 	if (*avail == 0)
1329 		return NULL;
1330 	bus_dmamap_sync(sc->sc_dmat, BWFM_PCI_DMA_MAP(ring->ring),
1331 	    ring->r_ptr * ring->itemsz, *avail * ring->itemsz,
1332 	    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1333 	return BWFM_PCI_DMA_KVA(ring->ring) + (ring->r_ptr * ring->itemsz);
1334 }
1335 
1336 /*
1337  * Let firmware know we read N descriptors.
1338  */
1339 void
bwfm_pci_ring_read_commit(struct bwfm_pci_softc * sc,struct bwfm_pci_msgring * ring,int nitem)1340 bwfm_pci_ring_read_commit(struct bwfm_pci_softc *sc,
1341     struct bwfm_pci_msgring *ring, int nitem)
1342 {
1343 	ring->r_ptr += nitem;
1344 	if (ring->r_ptr == ring->nitem)
1345 		ring->r_ptr = 0;
1346 	bwfm_pci_ring_write_rptr(sc, ring);
1347 }
1348 
1349 /*
1350  * Let firmware know that we submitted some descriptors.
1351  */
1352 void
bwfm_pci_ring_write_commit(struct bwfm_pci_softc * sc,struct bwfm_pci_msgring * ring)1353 bwfm_pci_ring_write_commit(struct bwfm_pci_softc *sc,
1354     struct bwfm_pci_msgring *ring)
1355 {
1356 	bus_dmamap_sync(sc->sc_dmat, BWFM_PCI_DMA_MAP(ring->ring),
1357 	    0, BWFM_PCI_DMA_LEN(ring->ring), BUS_DMASYNC_PREREAD |
1358 	    BUS_DMASYNC_PREWRITE);
1359 	bwfm_pci_ring_write_wptr(sc, ring);
1360 	bwfm_pci_ring_bell(sc, ring);
1361 }
1362 
1363 /*
1364  * Rollback N descriptors in case we don't actually want
1365  * to commit to it.
1366  */
1367 void
bwfm_pci_ring_write_cancel(struct bwfm_pci_softc * sc,struct bwfm_pci_msgring * ring,int nitem)1368 bwfm_pci_ring_write_cancel(struct bwfm_pci_softc *sc,
1369     struct bwfm_pci_msgring *ring, int nitem)
1370 {
1371 	if (ring->w_ptr == 0)
1372 		ring->w_ptr = ring->nitem - nitem;
1373 	else
1374 		ring->w_ptr -= nitem;
1375 }
1376 
1377 /*
1378  * Foreach written descriptor on the ring, pass the descriptor to
1379  * a message handler and let the firmware know we handled it.
1380  */
1381 void
bwfm_pci_ring_rx(struct bwfm_pci_softc * sc,struct bwfm_pci_msgring * ring)1382 bwfm_pci_ring_rx(struct bwfm_pci_softc *sc, struct bwfm_pci_msgring *ring)
1383 {
1384 	char *buf;
1385 	int avail, processed;
1386 
1387 again:
1388 	buf = bwfm_pci_ring_read_avail(sc, ring, &avail);
1389 	if (buf == NULL)
1390 		return;
1391 
1392 	processed = 0;
1393 	while (avail) {
1394 		bwfm_pci_msg_rx(sc, buf + sc->sc_rx_dataoffset);
1395 		buf += ring->itemsz;
1396 		processed++;
1397 		if (processed == 48) {
1398 			bwfm_pci_ring_read_commit(sc, ring, processed);
1399 			processed = 0;
1400 		}
1401 		avail--;
1402 	}
1403 	if (processed)
1404 		bwfm_pci_ring_read_commit(sc, ring, processed);
1405 	if (ring->r_ptr == 0)
1406 		goto again;
1407 }
1408 
1409 void
bwfm_pci_msg_rx(struct bwfm_pci_softc * sc,void * buf)1410 bwfm_pci_msg_rx(struct bwfm_pci_softc *sc, void *buf)
1411 {
1412 	struct ifnet *ifp = sc->sc_sc.sc_ic.ic_ifp;
1413 	struct msgbuf_ioctl_resp_hdr *resp;
1414 	struct msgbuf_tx_status *tx;
1415 	struct msgbuf_rx_complete *rx;
1416 	struct msgbuf_rx_event *event;
1417 	struct msgbuf_common_hdr *msg;
1418 	struct msgbuf_flowring_create_resp *fcr;
1419 	struct msgbuf_flowring_delete_resp *fdr;
1420 	struct bwfm_pci_msgring *ring;
1421 	struct mbuf *m;
1422 	int flowid;
1423 
1424 	msg = (struct msgbuf_common_hdr *)buf;
1425 	switch (msg->msgtype)
1426 	{
1427 	case MSGBUF_TYPE_FLOW_RING_CREATE_CMPLT:
1428 		fcr = (struct msgbuf_flowring_create_resp *)buf;
1429 		flowid = letoh16(fcr->compl_hdr.flow_ring_id);
1430 		if (flowid < 2)
1431 			break;
1432 		flowid -= 2;
1433 		if (flowid >= sc->sc_max_flowrings)
1434 			break;
1435 		ring = &sc->sc_flowrings[flowid];
1436 		if (ring->status != RING_OPENING)
1437 			break;
1438 		if (fcr->compl_hdr.status) {
1439 			printf("%s: failed to open flowring %d\n",
1440 			    DEVNAME(sc), flowid);
1441 			ring->status = RING_CLOSED;
1442 			m_freem(ring->m);
1443 			ring->m = NULL;
1444 			ifp->if_flags &= ~IFF_OACTIVE;
1445 			ifp->if_start(ifp);
1446 			break;
1447 		}
1448 		ring->status = RING_OPEN;
1449 		if (ring->m != NULL) {
1450 			m = ring->m;
1451 			ring->m = NULL;
1452 			if (bwfm_pci_txdata(&sc->sc_sc, &m))
1453 				m_freem(ring->m);
1454 		}
1455 		ifp->if_flags &= ~IFF_OACTIVE;
1456 		ifp->if_start(ifp);
1457 		break;
1458 	case MSGBUF_TYPE_FLOW_RING_DELETE_CMPLT:
1459 		fdr = (struct msgbuf_flowring_delete_resp *)buf;
1460 		flowid = letoh16(fdr->compl_hdr.flow_ring_id);
1461 		if (flowid < 2)
1462 			break;
1463 		flowid -= 2;
1464 		if (flowid >= sc->sc_max_flowrings)
1465 			break;
1466 		ring = &sc->sc_flowrings[flowid];
1467 		if (ring->status != RING_CLOSING)
1468 			break;
1469 		if (fdr->compl_hdr.status) {
1470 			printf("%s: failed to delete flowring %d\n",
1471 			    DEVNAME(sc), flowid);
1472 			break;
1473 		}
1474 		bwfm_pci_dmamem_free(sc, ring->ring);
1475 		ring->status = RING_CLOSED;
1476 		break;
1477 	case MSGBUF_TYPE_IOCTLPTR_REQ_ACK:
1478 		break;
1479 	case MSGBUF_TYPE_IOCTL_CMPLT:
1480 		resp = (struct msgbuf_ioctl_resp_hdr *)buf;
1481 		sc->sc_ioctl_resp_pktid = letoh32(resp->msg.request_id);
1482 		sc->sc_ioctl_resp_ret_len = letoh16(resp->resp_len);
1483 		sc->sc_ioctl_resp_status = letoh16(resp->compl_hdr.status);
1484 		if_rxr_put(&sc->sc_ioctl_ring, 1);
1485 		bwfm_pci_fill_rx_rings(sc);
1486 		wakeup(&sc->sc_ioctl_buf);
1487 		break;
1488 	case MSGBUF_TYPE_WL_EVENT:
1489 		event = (struct msgbuf_rx_event *)buf;
1490 		m = bwfm_pci_pktid_free(sc, &sc->sc_rx_pkts,
1491 		    letoh32(event->msg.request_id));
1492 		if (m == NULL)
1493 			break;
1494 		m_adj(m, sc->sc_rx_dataoffset);
1495 		m->m_len = m->m_pkthdr.len = letoh16(event->event_data_len);
1496 		bwfm_rx(&sc->sc_sc, m);
1497 		if_rxr_put(&sc->sc_event_ring, 1);
1498 		bwfm_pci_fill_rx_rings(sc);
1499 		break;
1500 	case MSGBUF_TYPE_TX_STATUS:
1501 		tx = (struct msgbuf_tx_status *)buf;
1502 		m = bwfm_pci_pktid_free(sc, &sc->sc_tx_pkts,
1503 		    letoh32(tx->msg.request_id));
1504 		if (m == NULL)
1505 			break;
1506 		m_freem(m);
1507 		if (sc->sc_tx_pkts_full) {
1508 			sc->sc_tx_pkts_full = 0;
1509 			ifp->if_flags &= ~IFF_OACTIVE;
1510 			ifp->if_start(ifp);
1511 		}
1512 		break;
1513 	case MSGBUF_TYPE_RX_CMPLT:
1514 		rx = (struct msgbuf_rx_complete *)buf;
1515 		m = bwfm_pci_pktid_free(sc, &sc->sc_rx_pkts,
1516 		    letoh32(rx->msg.request_id));
1517 		if (m == NULL)
1518 			break;
1519 		if (letoh16(rx->data_offset))
1520 			m_adj(m, letoh16(rx->data_offset));
1521 		else if (sc->sc_rx_dataoffset)
1522 			m_adj(m, sc->sc_rx_dataoffset);
1523 		m->m_len = m->m_pkthdr.len = letoh16(rx->data_len);
1524 		bwfm_rx(&sc->sc_sc, m);
1525 		if_rxr_put(&sc->sc_rxbuf_ring, 1);
1526 		bwfm_pci_fill_rx_rings(sc);
1527 		break;
1528 	default:
1529 		printf("%s: msgtype 0x%08x\n", __func__, msg->msgtype);
1530 		break;
1531 	}
1532 }
1533 
1534 /* Bus core helpers */
1535 void
bwfm_pci_select_core(struct bwfm_pci_softc * sc,int id)1536 bwfm_pci_select_core(struct bwfm_pci_softc *sc, int id)
1537 {
1538 	struct bwfm_softc *bwfm = (void *)sc;
1539 	struct bwfm_core *core;
1540 
1541 	core = bwfm_chip_get_core(bwfm, id);
1542 	if (core == NULL) {
1543 		printf("%s: could not find core to select", DEVNAME(sc));
1544 		return;
1545 	}
1546 
1547 	pci_conf_write(sc->sc_pc, sc->sc_tag,
1548 	    BWFM_PCI_BAR0_WINDOW, core->co_base);
1549 	if (pci_conf_read(sc->sc_pc, sc->sc_tag,
1550 	    BWFM_PCI_BAR0_WINDOW) != core->co_base)
1551 		pci_conf_write(sc->sc_pc, sc->sc_tag,
1552 		    BWFM_PCI_BAR0_WINDOW, core->co_base);
1553 }
1554 
1555 uint32_t
bwfm_pci_buscore_read(struct bwfm_softc * bwfm,uint32_t reg)1556 bwfm_pci_buscore_read(struct bwfm_softc *bwfm, uint32_t reg)
1557 {
1558 	struct bwfm_pci_softc *sc = (void *)bwfm;
1559 	uint32_t page, offset;
1560 
1561 	page = reg & ~(BWFM_PCI_BAR0_REG_SIZE - 1);
1562 	offset = reg & (BWFM_PCI_BAR0_REG_SIZE - 1);
1563 	pci_conf_write(sc->sc_pc, sc->sc_tag, BWFM_PCI_BAR0_WINDOW, page);
1564 	return bus_space_read_4(sc->sc_reg_iot, sc->sc_reg_ioh, offset);
1565 }
1566 
1567 void
bwfm_pci_buscore_write(struct bwfm_softc * bwfm,uint32_t reg,uint32_t val)1568 bwfm_pci_buscore_write(struct bwfm_softc *bwfm, uint32_t reg, uint32_t val)
1569 {
1570 	struct bwfm_pci_softc *sc = (void *)bwfm;
1571 	uint32_t page, offset;
1572 
1573 	page = reg & ~(BWFM_PCI_BAR0_REG_SIZE - 1);
1574 	offset = reg & (BWFM_PCI_BAR0_REG_SIZE - 1);
1575 	pci_conf_write(sc->sc_pc, sc->sc_tag, BWFM_PCI_BAR0_WINDOW, page);
1576 	bus_space_write_4(sc->sc_reg_iot, sc->sc_reg_ioh, offset, val);
1577 }
1578 
1579 int
bwfm_pci_buscore_prepare(struct bwfm_softc * bwfm)1580 bwfm_pci_buscore_prepare(struct bwfm_softc *bwfm)
1581 {
1582 	return 0;
1583 }
1584 
1585 int
bwfm_pci_buscore_reset(struct bwfm_softc * bwfm)1586 bwfm_pci_buscore_reset(struct bwfm_softc *bwfm)
1587 {
1588 	struct bwfm_pci_softc *sc = (void *)bwfm;
1589 	struct bwfm_core *core;
1590 	uint32_t reg;
1591 	int i;
1592 
1593 	bwfm_pci_select_core(sc, BWFM_AGENT_CORE_PCIE2);
1594 	reg = pci_conf_read(sc->sc_pc, sc->sc_tag,
1595 	    BWFM_PCI_CFGREG_LINK_STATUS_CTRL);
1596 	pci_conf_write(sc->sc_pc, sc->sc_tag, BWFM_PCI_CFGREG_LINK_STATUS_CTRL,
1597 	    reg & ~BWFM_PCI_CFGREG_LINK_STATUS_CTRL_ASPM_ENAB);
1598 
1599 	bwfm_pci_select_core(sc, BWFM_AGENT_CORE_CHIPCOMMON);
1600 	bus_space_write_4(sc->sc_reg_iot, sc->sc_reg_ioh,
1601 	    BWFM_CHIP_REG_WATCHDOG, 4);
1602 	delay(100 * 1000);
1603 
1604 	bwfm_pci_select_core(sc, BWFM_AGENT_CORE_PCIE2);
1605 	pci_conf_write(sc->sc_pc, sc->sc_tag,
1606 	    BWFM_PCI_CFGREG_LINK_STATUS_CTRL, reg);
1607 
1608 	core = bwfm_chip_get_core(bwfm, BWFM_AGENT_CORE_PCIE2);
1609 	if (core->co_rev <= 13) {
1610 		uint16_t cfg_offset[] = {
1611 		    BWFM_PCI_CFGREG_STATUS_CMD,
1612 		    BWFM_PCI_CFGREG_PM_CSR,
1613 		    BWFM_PCI_CFGREG_MSI_CAP,
1614 		    BWFM_PCI_CFGREG_MSI_ADDR_L,
1615 		    BWFM_PCI_CFGREG_MSI_ADDR_H,
1616 		    BWFM_PCI_CFGREG_MSI_DATA,
1617 		    BWFM_PCI_CFGREG_LINK_STATUS_CTRL2,
1618 		    BWFM_PCI_CFGREG_RBAR_CTRL,
1619 		    BWFM_PCI_CFGREG_PML1_SUB_CTRL1,
1620 		    BWFM_PCI_CFGREG_REG_BAR2_CONFIG,
1621 		    BWFM_PCI_CFGREG_REG_BAR3_CONFIG,
1622 		};
1623 
1624 		for (i = 0; i < nitems(cfg_offset); i++) {
1625 			bus_space_write_4(sc->sc_reg_iot, sc->sc_reg_ioh,
1626 			    BWFM_PCI_PCIE2REG_CONFIGADDR, cfg_offset[i]);
1627 			reg = bus_space_read_4(sc->sc_reg_iot, sc->sc_reg_ioh,
1628 			    BWFM_PCI_PCIE2REG_CONFIGDATA);
1629 			DPRINTFN(3, ("%s: config offset 0x%04x, value 0x%04x\n",
1630 			    DEVNAME(sc), cfg_offset[i], reg));
1631 			bus_space_write_4(sc->sc_reg_iot, sc->sc_reg_ioh,
1632 			    BWFM_PCI_PCIE2REG_CONFIGDATA, reg);
1633 		}
1634 	}
1635 
1636 	reg = bus_space_read_4(sc->sc_reg_iot, sc->sc_reg_ioh,
1637 	    BWFM_PCI_PCIE2REG_MAILBOXINT);
1638 	if (reg != 0xffffffff)
1639 		bus_space_write_4(sc->sc_reg_iot, sc->sc_reg_ioh,
1640 		    BWFM_PCI_PCIE2REG_MAILBOXINT, reg);
1641 
1642 	return 0;
1643 }
1644 
1645 void
bwfm_pci_buscore_activate(struct bwfm_softc * bwfm,const uint32_t rstvec)1646 bwfm_pci_buscore_activate(struct bwfm_softc *bwfm, const uint32_t rstvec)
1647 {
1648 	struct bwfm_pci_softc *sc = (void *)bwfm;
1649 	bus_space_write_4(sc->sc_tcm_iot, sc->sc_tcm_ioh, 0, rstvec);
1650 }
1651 
1652 static int bwfm_pci_prio2fifo[8] = {
1653 	1, /* best effort */
1654 	0, /* IPTOS_PREC_IMMEDIATE */
1655 	0, /* IPTOS_PREC_PRIORITY */
1656 	1, /* IPTOS_PREC_FLASH */
1657 	2, /* IPTOS_PREC_FLASHOVERRIDE */
1658 	2, /* IPTOS_PREC_CRITIC_ECP */
1659 	3, /* IPTOS_PREC_INTERNETCONTROL */
1660 	3, /* IPTOS_PREC_NETCONTROL */
1661 };
1662 
1663 int
bwfm_pci_flowring_lookup(struct bwfm_pci_softc * sc,struct mbuf * m)1664 bwfm_pci_flowring_lookup(struct bwfm_pci_softc *sc, struct mbuf *m)
1665 {
1666 	struct ieee80211com *ic = &sc->sc_sc.sc_ic;
1667 	uint8_t *da = mtod(m, uint8_t *);
1668 	struct ether_header *eh;
1669 	int flowid, prio, fifo;
1670 	int i, found, ac;
1671 
1672 	/* No QoS for EAPOL frames. */
1673 	eh = mtod(m, struct ether_header *);
1674 	ac = (eh->ether_type != htons(ETHERTYPE_PAE)) ?
1675 	    M_WME_GETAC(m) : WME_AC_BE;
1676 
1677 	prio = ac;
1678 	fifo = bwfm_pci_prio2fifo[prio];
1679 
1680 	switch (ic->ic_opmode)
1681 	{
1682 	case IEEE80211_M_STA:
1683 		flowid = fifo;
1684 		break;
1685 #ifndef IEEE80211_STA_ONLY
1686 	case IEEE80211_M_HOSTAP:
1687 		if (ETHER_IS_MULTICAST(da))
1688 			da = __UNCONST(etherbroadcastaddr);
1689 		flowid = da[5] * 2 + fifo;
1690 		break;
1691 #endif
1692 	default:
1693 		printf("%s: state not supported\n", DEVNAME(sc));
1694 		return ENOBUFS;
1695 	}
1696 
1697 	found = 0;
1698 	flowid = flowid % sc->sc_max_flowrings;
1699 	for (i = 0; i < sc->sc_max_flowrings; i++) {
1700 		if (ic->ic_opmode == IEEE80211_M_STA &&
1701 		    sc->sc_flowrings[flowid].status >= RING_OPEN &&
1702 		    sc->sc_flowrings[flowid].fifo == fifo) {
1703 			found = 1;
1704 			break;
1705 		}
1706 #ifndef IEEE80211_STA_ONLY
1707 		if (ic->ic_opmode == IEEE80211_M_HOSTAP &&
1708 		    sc->sc_flowrings[flowid].status >= RING_OPEN &&
1709 		    sc->sc_flowrings[flowid].fifo == fifo &&
1710 		    !memcmp(sc->sc_flowrings[flowid].mac, da, ETHER_ADDR_LEN)) {
1711 			found = 1;
1712 			break;
1713 		}
1714 #endif
1715 		flowid = (flowid + 1) % sc->sc_max_flowrings;
1716 	}
1717 
1718 	if (found)
1719 		return flowid;
1720 
1721 	return -1;
1722 }
1723 
1724 void
bwfm_pci_flowring_create(struct bwfm_pci_softc * sc,struct mbuf * m)1725 bwfm_pci_flowring_create(struct bwfm_pci_softc *sc, struct mbuf *m)
1726 {
1727 	struct ieee80211com *ic = &sc->sc_sc.sc_ic;
1728 	struct bwfm_cmd_flowring_create * cmd;
1729 	uint8_t *da = mtod(m, uint8_t *);
1730 	struct ether_header *eh;
1731 	struct bwfm_pci_msgring *ring;
1732 	int flowid, prio, fifo;
1733 	int i, found, ac;
1734 
1735 	cmd = pool_get(&sc->sc_flowring_pool, PR_NOWAIT);
1736 	if (__predict_false(cmd == NULL))
1737 		return;
1738 
1739 	/* No QoS for EAPOL frames. */
1740 	eh = mtod(m, struct ether_header *);
1741 	ac = (eh->ether_type != htons(ETHERTYPE_PAE)) ?
1742 	    M_WME_GETAC(m) : WME_AC_BE;
1743 
1744 	prio = ac;
1745 	fifo = bwfm_pci_prio2fifo[prio];
1746 
1747 	switch (ic->ic_opmode)
1748 	{
1749 	case IEEE80211_M_STA:
1750 		flowid = fifo;
1751 		break;
1752 #ifndef IEEE80211_STA_ONLY
1753 	case IEEE80211_M_HOSTAP:
1754 		if (ETHER_IS_MULTICAST(da))
1755 			da = __UNCONST(etherbroadcastaddr);
1756 		flowid = da[5] * 2 + fifo;
1757 		break;
1758 #endif
1759 	default:
1760 		printf("%s: state not supported\n", DEVNAME(sc));
1761 		return;
1762 	}
1763 
1764 	found = 0;
1765 	flowid = flowid % sc->sc_max_flowrings;
1766 	for (i = 0; i < sc->sc_max_flowrings; i++) {
1767 		ring = &sc->sc_flowrings[flowid];
1768 		if (ring->status == RING_CLOSED) {
1769 			ring->status = RING_OPENING;
1770 			found = 1;
1771 			break;
1772 		}
1773 		flowid = (flowid + 1) % sc->sc_max_flowrings;
1774 	}
1775 
1776 	/*
1777 	 * We cannot recover from that so far.  Only a stop/init
1778 	 * cycle can revive this if it ever happens at all.
1779 	 */
1780 	if (!found) {
1781 		printf("%s: no flowring available\n", DEVNAME(sc));
1782 		return;
1783 	}
1784 
1785 	cmd->sc = sc;
1786 	cmd->m = m;
1787 	cmd->prio = prio;
1788 	cmd->flowid = flowid;
1789 	workqueue_enqueue(sc->flowring_wq, &cmd->wq_cookie, NULL);
1790 }
1791 
1792 void
bwfm_pci_flowring_create_cb(struct work * wk,void * arg)1793 bwfm_pci_flowring_create_cb(struct work *wk, void *arg) //(struct bwfm_softc *bwfm, void *arg)
1794 {
1795 	struct bwfm_cmd_flowring_create *cmd = container_of(wk, struct bwfm_cmd_flowring_create, wq_cookie);
1796 	struct bwfm_pci_softc *sc = cmd->sc; // (void *)bwfm;
1797 	struct ieee80211com *ic = &sc->sc_sc.sc_ic;
1798 	struct msgbuf_tx_flowring_create_req *req;
1799 	struct bwfm_pci_msgring *ring;
1800 	uint8_t *da, *sa;
1801 
1802 	da = mtod(cmd->m, char *) + 0 * ETHER_ADDR_LEN;
1803 	sa = mtod(cmd->m, char *) + 1 * ETHER_ADDR_LEN;
1804 
1805 	ring = &sc->sc_flowrings[cmd->flowid];
1806 	if (ring->status != RING_OPENING) {
1807 		printf("%s: flowring not opening\n", DEVNAME(sc));
1808 		return;
1809 	}
1810 
1811 	if (bwfm_pci_setup_flowring(sc, ring, 512, 48)) {
1812 		printf("%s: cannot setup flowring\n", DEVNAME(sc));
1813 		return;
1814 	}
1815 
1816 	req = bwfm_pci_ring_write_reserve(sc, &sc->sc_ctrl_submit);
1817 	if (req == NULL) {
1818 		printf("%s: cannot reserve for flowring\n", DEVNAME(sc));
1819 		return;
1820 	}
1821 
1822 	ring->status = RING_OPENING;
1823 	ring->fifo = bwfm_pci_prio2fifo[cmd->prio];
1824 	ring->m = cmd->m;
1825 	memcpy(ring->mac, da, ETHER_ADDR_LEN);
1826 #ifndef IEEE80211_STA_ONLY
1827 	if (ic->ic_opmode == IEEE80211_M_HOSTAP && ETHER_IS_MULTICAST(da))
1828 		memcpy(ring->mac, etherbroadcastaddr, ETHER_ADDR_LEN);
1829 #endif
1830 
1831 	req->msg.msgtype = MSGBUF_TYPE_FLOW_RING_CREATE;
1832 	req->msg.ifidx = 0;
1833 	req->msg.request_id = 0;
1834 	req->tid = bwfm_pci_prio2fifo[cmd->prio];
1835 	req->flow_ring_id = letoh16(cmd->flowid + 2);
1836 	memcpy(req->da, da, ETHER_ADDR_LEN);
1837 	memcpy(req->sa, sa, ETHER_ADDR_LEN);
1838 	req->flow_ring_addr.high_addr =
1839 	    letoh32(BWFM_PCI_DMA_DVA(ring->ring) >> 32);
1840 	req->flow_ring_addr.low_addr =
1841 	    letoh32(BWFM_PCI_DMA_DVA(ring->ring) & 0xffffffff);
1842 	req->max_items = letoh16(512);
1843 	req->len_item = letoh16(48);
1844 
1845 	bwfm_pci_ring_write_commit(sc, &sc->sc_ctrl_submit);
1846 	pool_put(&sc->sc_flowring_pool, cmd);
1847 }
1848 
1849 void
bwfm_pci_flowring_delete(struct bwfm_pci_softc * sc,int flowid)1850 bwfm_pci_flowring_delete(struct bwfm_pci_softc *sc, int flowid)
1851 {
1852 	struct msgbuf_tx_flowring_delete_req *req;
1853 	struct bwfm_pci_msgring *ring;
1854 
1855 	ring = &sc->sc_flowrings[flowid];
1856 	if (ring->status != RING_OPEN) {
1857 		printf("%s: flowring not open\n", DEVNAME(sc));
1858 		return;
1859 	}
1860 
1861 	req = bwfm_pci_ring_write_reserve(sc, &sc->sc_ctrl_submit);
1862 	if (req == NULL) {
1863 		printf("%s: cannot reserve for flowring\n", DEVNAME(sc));
1864 		return;
1865 	}
1866 
1867 	ring->status = RING_CLOSING;
1868 
1869 	req->msg.msgtype = MSGBUF_TYPE_FLOW_RING_DELETE;
1870 	req->msg.ifidx = 0;
1871 	req->msg.request_id = 0;
1872 	req->flow_ring_id = letoh16(flowid + 2);
1873 	req->reason = 0;
1874 
1875 	bwfm_pci_ring_write_commit(sc, &sc->sc_ctrl_submit);
1876 }
1877 
1878 void
bwfm_pci_stop(struct bwfm_softc * bwfm)1879 bwfm_pci_stop(struct bwfm_softc *bwfm)
1880 {
1881 	struct bwfm_pci_softc *sc = (void *)bwfm;
1882 	struct bwfm_pci_msgring *ring;
1883 	int i;
1884 
1885 	for (i = 0; i < sc->sc_max_flowrings; i++) {
1886 		ring = &sc->sc_flowrings[i];
1887 		if (ring->status == RING_OPEN)
1888 			bwfm_pci_flowring_delete(sc, i);
1889 	}
1890 }
1891 
1892 int
bwfm_pci_txcheck(struct bwfm_softc * bwfm)1893 bwfm_pci_txcheck(struct bwfm_softc *bwfm)
1894 {
1895 	struct bwfm_pci_softc *sc = (void *)bwfm;
1896 	struct bwfm_pci_msgring *ring;
1897 	int i;
1898 
1899 	/* If we are transitioning, we cannot send. */
1900 	for (i = 0; i < sc->sc_max_flowrings; i++) {
1901 		ring = &sc->sc_flowrings[i];
1902 		if (ring->status == RING_OPENING)
1903 			return ENOBUFS;
1904 	}
1905 
1906 	if (bwfm_pci_pktid_avail(sc, &sc->sc_tx_pkts)) {
1907 		sc->sc_tx_pkts_full = 1;
1908 		return ENOBUFS;
1909 	}
1910 
1911 	return 0;
1912 }
1913 
1914 int
bwfm_pci_txdata(struct bwfm_softc * bwfm,struct mbuf ** mp)1915 bwfm_pci_txdata(struct bwfm_softc *bwfm, struct mbuf **mp)
1916 {
1917 	struct bwfm_pci_softc *sc = (void *)bwfm;
1918 	struct bwfm_pci_msgring *ring;
1919 	struct msgbuf_tx_msghdr *tx;
1920 	uint32_t pktid;
1921 	paddr_t paddr;
1922 	uint64_t devaddr;
1923 	struct ether_header *eh;
1924 	int flowid, ret, ac;
1925 
1926 	flowid = bwfm_pci_flowring_lookup(sc, *mp);
1927 	if (flowid < 0) {
1928 		/*
1929 		 * We cannot send the packet right now as there is
1930 		 * no flowring yet.  The flowring will be created
1931 		 * asynchronously.  While the ring is transitioning
1932 		 * the TX check will tell the upper layers that we
1933 		 * cannot send packets right now.  When the flowring
1934 		 * is created the queue will be restarted and this
1935 		 * mbuf will be transmitted.
1936 		 */
1937 		bwfm_pci_flowring_create(sc, *mp);
1938 		return 0;
1939 	}
1940 
1941 	ring = &sc->sc_flowrings[flowid];
1942 	if (ring->status == RING_OPENING ||
1943 	    ring->status == RING_CLOSING) {
1944 		printf("%s: tried to use a flow that was "
1945 		    "transitioning in status %d\n",
1946 		    DEVNAME(sc), ring->status);
1947 		return ENOBUFS;
1948 	}
1949 
1950 	tx = bwfm_pci_ring_write_reserve(sc, ring);
1951 	if (tx == NULL)
1952 		return ENOBUFS;
1953 
1954 	/* No QoS for EAPOL frames. */
1955 	eh = mtod(*mp, struct ether_header *);
1956 	ac = (eh->ether_type != htons(ETHERTYPE_PAE)) ?
1957 	    M_WME_GETAC(*mp) : WME_AC_BE;
1958 
1959 	memset(tx, 0, sizeof(*tx));
1960 	tx->msg.msgtype = MSGBUF_TYPE_TX_POST;
1961 	tx->msg.ifidx = 0;
1962 	tx->flags = BWFM_MSGBUF_PKT_FLAGS_FRAME_802_3;
1963 	tx->flags |= ac << BWFM_MSGBUF_PKT_FLAGS_PRIO_SHIFT;
1964 	tx->seg_cnt = 1;
1965 	memcpy(tx->txhdr, mtod(*mp, char *), ETHER_HDR_LEN);
1966 
1967 	ret = bwfm_pci_pktid_new(sc, &sc->sc_tx_pkts, mp, &pktid, &paddr);
1968 	if (ret) {
1969 		if (ret == ENOBUFS) {
1970 			printf("%s: no pktid available for TX\n",
1971 			    DEVNAME(sc));
1972 			sc->sc_tx_pkts_full = 1;
1973 		}
1974 		bwfm_pci_ring_write_cancel(sc, ring, 1);
1975 		return ret;
1976 	}
1977 	devaddr = paddr + ETHER_HDR_LEN;
1978 
1979 	tx->msg.request_id = htole32(pktid);
1980 	tx->data_len = htole16((*mp)->m_len - ETHER_HDR_LEN);
1981 	tx->data_buf_addr.high_addr = htole32(devaddr >> 32);
1982 	tx->data_buf_addr.low_addr = htole32(devaddr & 0xffffffff);
1983 
1984 	bwfm_pci_ring_write_commit(sc, ring);
1985 	return 0;
1986 }
1987 
1988 #ifdef BWFM_DEBUG
1989 void
bwfm_pci_debug_console(struct bwfm_pci_softc * sc)1990 bwfm_pci_debug_console(struct bwfm_pci_softc *sc)
1991 {
1992 	uint32_t newidx = bus_space_read_4(sc->sc_tcm_iot, sc->sc_tcm_ioh,
1993 	    sc->sc_console_base_addr + BWFM_CONSOLE_WRITEIDX);
1994 
1995 	if (newidx != sc->sc_console_readidx)
1996 		DPRINTFN(3, ("BWFM CONSOLE: "));
1997 	while (newidx != sc->sc_console_readidx) {
1998 		uint8_t ch = bus_space_read_1(sc->sc_tcm_iot, sc->sc_tcm_ioh,
1999 		    sc->sc_console_buf_addr + sc->sc_console_readidx);
2000 		sc->sc_console_readidx++;
2001 		if (sc->sc_console_readidx == sc->sc_console_buf_size)
2002 			sc->sc_console_readidx = 0;
2003 		if (ch == '\r')
2004 			continue;
2005 		DPRINTFN(3, ("%c", ch));
2006 	}
2007 }
2008 #endif
2009 
2010 int
bwfm_pci_intr(void * v)2011 bwfm_pci_intr(void *v)
2012 {
2013 	struct bwfm_pci_softc *sc = (void *)v;
2014 	uint32_t status;
2015 
2016 	if ((status = bus_space_read_4(sc->sc_reg_iot, sc->sc_reg_ioh,
2017 	    BWFM_PCI_PCIE2REG_MAILBOXINT)) == 0)
2018 		return 0;
2019 
2020 	bwfm_pci_intr_disable(sc);
2021 	bus_space_write_4(sc->sc_reg_iot, sc->sc_reg_ioh,
2022 	    BWFM_PCI_PCIE2REG_MAILBOXINT, status);
2023 
2024 	if (status & (BWFM_PCI_PCIE2REG_MAILBOXMASK_INT_FN0_0 |
2025 	    BWFM_PCI_PCIE2REG_MAILBOXMASK_INT_FN0_1))
2026 		printf("%s: handle MB data\n", __func__);
2027 
2028 	if (status & BWFM_PCI_PCIE2REG_MAILBOXMASK_INT_D2H_DB) {
2029 		bwfm_pci_ring_rx(sc, &sc->sc_rx_complete);
2030 		bwfm_pci_ring_rx(sc, &sc->sc_tx_complete);
2031 		bwfm_pci_ring_rx(sc, &sc->sc_ctrl_complete);
2032 	}
2033 
2034 #ifdef BWFM_DEBUG
2035 	bwfm_pci_debug_console(sc);
2036 #endif
2037 
2038 	bwfm_pci_intr_enable(sc);
2039 	return 1;
2040 }
2041 
2042 void
bwfm_pci_intr_enable(struct bwfm_pci_softc * sc)2043 bwfm_pci_intr_enable(struct bwfm_pci_softc *sc)
2044 {
2045 	bus_space_write_4(sc->sc_reg_iot, sc->sc_reg_ioh,
2046 	    BWFM_PCI_PCIE2REG_MAILBOXMASK,
2047 	    BWFM_PCI_PCIE2REG_MAILBOXMASK_INT_FN0_0 |
2048 	    BWFM_PCI_PCIE2REG_MAILBOXMASK_INT_FN0_1 |
2049 	    BWFM_PCI_PCIE2REG_MAILBOXMASK_INT_D2H_DB);
2050 }
2051 
2052 void
bwfm_pci_intr_disable(struct bwfm_pci_softc * sc)2053 bwfm_pci_intr_disable(struct bwfm_pci_softc *sc)
2054 {
2055 	bus_space_write_4(sc->sc_reg_iot, sc->sc_reg_ioh,
2056 	    BWFM_PCI_PCIE2REG_MAILBOXMASK, 0);
2057 }
2058 
2059 /* Msgbuf protocol implementation */
2060 int
bwfm_pci_msgbuf_query_dcmd(struct bwfm_softc * bwfm,int ifidx,int cmd,char * buf,size_t * len)2061 bwfm_pci_msgbuf_query_dcmd(struct bwfm_softc *bwfm, int ifidx,
2062     int cmd, char *buf, size_t *len)
2063 {
2064 	struct bwfm_pci_softc *sc = (void *)bwfm;
2065 	struct msgbuf_ioctl_req_hdr *req;
2066 	struct mbuf *m;
2067 	size_t buflen;
2068 	int s;
2069 
2070 	s = splnet();
2071 	sc->sc_ioctl_resp_pktid = -1;
2072 	req = bwfm_pci_ring_write_reserve(sc, &sc->sc_ctrl_submit);
2073 	if (req == NULL) {
2074 		printf("%s: cannot reserve for write\n", DEVNAME(sc));
2075 		splx(s);
2076 		return 1;
2077 	}
2078 	req->msg.msgtype = MSGBUF_TYPE_IOCTLPTR_REQ;
2079 	req->msg.ifidx = 0;
2080 	req->msg.flags = 0;
2081 	req->msg.request_id = htole32(MSGBUF_IOCTL_REQ_PKTID);
2082 	req->cmd = htole32(cmd);
2083 	req->output_buf_len = htole16(*len);
2084 	req->trans_id = htole16(sc->sc_ioctl_reqid++);
2085 
2086 	buflen = uimin(*len, BWFM_DMA_H2D_IOCTL_BUF_LEN);
2087 	req->input_buf_len = htole16(buflen);
2088 	req->req_buf_addr.high_addr =
2089 	    htole32((uint64_t)BWFM_PCI_DMA_DVA(sc->sc_ioctl_buf) >> 32);
2090 	req->req_buf_addr.low_addr =
2091 	    htole32((uint64_t)BWFM_PCI_DMA_DVA(sc->sc_ioctl_buf) & 0xffffffff);
2092 	if (buf)
2093 		memcpy(BWFM_PCI_DMA_KVA(sc->sc_ioctl_buf), buf, buflen);
2094 	else
2095 		memset(BWFM_PCI_DMA_KVA(sc->sc_ioctl_buf), 0, buflen);
2096 
2097 	bwfm_pci_ring_write_commit(sc, &sc->sc_ctrl_submit);
2098 	splx(s);
2099 
2100 	if (tsleep(&sc->sc_ioctl_buf, PCATCH, "bwfm", hz)) {
2101 		printf("%s: timeout waiting for ioctl response\n",
2102 		    DEVNAME(sc));
2103 		return 1;
2104 	}
2105 
2106 	m = bwfm_pci_pktid_free(sc, &sc->sc_rx_pkts, sc->sc_ioctl_resp_pktid);
2107 	if (m == NULL)
2108 		return 1;
2109 
2110 	*len = uimin(buflen, sc->sc_ioctl_resp_ret_len);
2111 	if (buf)
2112 		memcpy(buf, mtod(m, char *), *len);
2113 	m_freem(m);
2114 	splx(s);
2115 
2116 	return 0;
2117 }
2118 
2119 int
bwfm_pci_msgbuf_set_dcmd(struct bwfm_softc * bwfm,int ifidx,int cmd,char * buf,size_t len)2120 bwfm_pci_msgbuf_set_dcmd(struct bwfm_softc *bwfm, int ifidx,
2121     int cmd, char *buf, size_t len)
2122 {
2123 	return bwfm_pci_msgbuf_query_dcmd(bwfm, ifidx, cmd, buf, &len);
2124 }
2125