xref: /openbsd-src/sys/dev/pci/if_myx.c (revision d59bb9942320b767f2a19aaa7690c8c6e30b724c)
1 /*	$OpenBSD: if_myx.c,v 1.102 2017/02/07 06:51:58 dlg Exp $	*/
2 
3 /*
4  * Copyright (c) 2007 Reyk Floeter <reyk@openbsd.org>
5  *
6  * Permission to use, copy, modify, and distribute this software for any
7  * purpose with or without fee is hereby granted, provided that the above
8  * copyright notice and this permission notice appear in all copies.
9  *
10  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
11  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
12  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
13  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
14  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
15  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
16  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 /*
20  * Driver for the Myricom Myri-10G Lanai-Z8E Ethernet chipsets.
21  */
22 
23 #include "bpfilter.h"
24 
25 #include <sys/param.h>
26 #include <sys/systm.h>
27 #include <sys/sockio.h>
28 #include <sys/mbuf.h>
29 #include <sys/kernel.h>
30 #include <sys/socket.h>
31 #include <sys/malloc.h>
32 #include <sys/pool.h>
33 #include <sys/timeout.h>
34 #include <sys/device.h>
35 #include <sys/proc.h>
36 #include <sys/queue.h>
37 
38 #include <machine/bus.h>
39 #include <machine/intr.h>
40 
41 #include <net/if.h>
42 #include <net/if_dl.h>
43 #include <net/if_media.h>
44 
45 #if NBPFILTER > 0
46 #include <net/bpf.h>
47 #endif
48 
49 #include <netinet/in.h>
50 #include <netinet/if_ether.h>
51 
52 #include <dev/pci/pcireg.h>
53 #include <dev/pci/pcivar.h>
54 #include <dev/pci/pcidevs.h>
55 
56 #include <dev/pci/if_myxreg.h>
57 
58 #ifdef MYX_DEBUG
59 #define MYXDBG_INIT	(1<<0)	/* chipset initialization */
60 #define MYXDBG_CMD	(2<<0)	/* commands */
61 #define MYXDBG_INTR	(3<<0)	/* interrupts */
62 #define MYXDBG_ALL	0xffff	/* enable all debugging messages */
63 int myx_debug = MYXDBG_ALL;
64 #define DPRINTF(_lvl, _arg...)	do {					\
65 	if (myx_debug & (_lvl))						\
66 		printf(_arg);						\
67 } while (0)
68 #else
69 #define DPRINTF(_lvl, arg...)
70 #endif
71 
72 #define DEVNAME(_s)	((_s)->sc_dev.dv_xname)
73 
74 struct myx_dmamem {
75 	bus_dmamap_t		 mxm_map;
76 	bus_dma_segment_t	 mxm_seg;
77 	int			 mxm_nsegs;
78 	size_t			 mxm_size;
79 	caddr_t			 mxm_kva;
80 };
81 
82 struct pool *myx_mcl_pool;
83 
84 struct myx_slot {
85 	bus_dmamap_t		 ms_map;
86 	struct mbuf		*ms_m;
87 };
88 
89 struct myx_rx_ring {
90 	struct myx_softc	*mrr_softc;
91 	struct timeout		 mrr_refill;
92 	struct if_rxring	 mrr_rxr;
93 	struct myx_slot		*mrr_slots;
94 	u_int32_t		 mrr_offset;
95 	u_int			 mrr_running;
96 	u_int			 mrr_prod;
97 	u_int			 mrr_cons;
98 	struct mbuf		*(*mrr_mclget)(void);
99 };
100 
101 enum myx_state {
102 	MYX_S_OFF = 0,
103 	MYX_S_RUNNING,
104 	MYX_S_DOWN
105 };
106 
107 struct myx_softc {
108 	struct device		 sc_dev;
109 	struct arpcom		 sc_ac;
110 
111 	pci_chipset_tag_t	 sc_pc;
112 	pci_intr_handle_t	 sc_ih;
113 	pcitag_t		 sc_tag;
114 
115 	bus_dma_tag_t		 sc_dmat;
116 	bus_space_tag_t		 sc_memt;
117 	bus_space_handle_t	 sc_memh;
118 	bus_size_t		 sc_mems;
119 
120 	struct myx_dmamem	 sc_zerodma;
121 	struct myx_dmamem	 sc_cmddma;
122 	struct myx_dmamem	 sc_paddma;
123 
124 	struct myx_dmamem	 sc_sts_dma;
125 	volatile struct myx_status	*sc_sts;
126 
127 	int			 sc_intx;
128 	void			*sc_irqh;
129 	u_int32_t		 sc_irqcoaloff;
130 	u_int32_t		 sc_irqclaimoff;
131 	u_int32_t		 sc_irqdeassertoff;
132 
133 	struct myx_dmamem	 sc_intrq_dma;
134 	struct myx_intrq_desc	*sc_intrq;
135 	u_int			 sc_intrq_count;
136 	u_int			 sc_intrq_idx;
137 
138 	u_int			 sc_rx_ring_count;
139 #define  MYX_RXSMALL		 0
140 #define  MYX_RXBIG		 1
141 	struct myx_rx_ring	 sc_rx_ring[2];
142 
143 	bus_size_t		 sc_tx_boundary;
144 	u_int			 sc_tx_ring_count;
145 	u_int32_t		 sc_tx_ring_offset;
146 	u_int			 sc_tx_nsegs;
147 	u_int32_t		 sc_tx_count; /* shadows ms_txdonecnt */
148 	u_int			 sc_tx_ring_prod;
149 	u_int			 sc_tx_ring_cons;
150 
151 	u_int			 sc_tx_prod;
152 	u_int			 sc_tx_cons;
153 	struct myx_slot		*sc_tx_slots;
154 
155 	struct ifmedia		 sc_media;
156 
157 	volatile enum myx_state	 sc_state;
158 	volatile u_int8_t	 sc_linkdown;
159 };
160 
161 #define MYX_RXSMALL_SIZE	MCLBYTES
162 #define MYX_RXBIG_SIZE		(MYX_MTU - \
163     (ETHER_ALIGN + ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN))
164 
165 int	 myx_match(struct device *, void *, void *);
166 void	 myx_attach(struct device *, struct device *, void *);
167 int	 myx_pcie_dc(struct myx_softc *, struct pci_attach_args *);
168 int	 myx_query(struct myx_softc *sc, char *, size_t);
169 u_int	 myx_ether_aton(char *, u_int8_t *, u_int);
170 void	 myx_attachhook(struct device *);
171 int	 myx_loadfirmware(struct myx_softc *, const char *);
172 int	 myx_probe_firmware(struct myx_softc *);
173 
174 void	 myx_read(struct myx_softc *, bus_size_t, void *, bus_size_t);
175 void	 myx_write(struct myx_softc *, bus_size_t, void *, bus_size_t);
176 
177 #if defined(__LP64__)
178 #define _myx_bus_space_write bus_space_write_raw_region_8
179 typedef u_int64_t myx_bus_t;
180 #else
181 #define _myx_bus_space_write bus_space_write_raw_region_4
182 typedef u_int32_t myx_bus_t;
183 #endif
184 #define myx_bus_space_write(_sc, _o, _a, _l) \
185     _myx_bus_space_write((_sc)->sc_memt, (_sc)->sc_memh, (_o), (_a), (_l))
186 
187 int	 myx_cmd(struct myx_softc *, u_int32_t, struct myx_cmd *, u_int32_t *);
188 int	 myx_boot(struct myx_softc *, u_int32_t);
189 
190 int	 myx_rdma(struct myx_softc *, u_int);
191 int	 myx_dmamem_alloc(struct myx_softc *, struct myx_dmamem *,
192 	    bus_size_t, u_int align);
193 void	 myx_dmamem_free(struct myx_softc *, struct myx_dmamem *);
194 int	 myx_media_change(struct ifnet *);
195 void	 myx_media_status(struct ifnet *, struct ifmediareq *);
196 void	 myx_link_state(struct myx_softc *, u_int32_t);
197 void	 myx_watchdog(struct ifnet *);
198 int	 myx_ioctl(struct ifnet *, u_long, caddr_t);
199 int	 myx_rxrinfo(struct myx_softc *, struct if_rxrinfo *);
200 void	 myx_up(struct myx_softc *);
201 void	 myx_iff(struct myx_softc *);
202 void	 myx_down(struct myx_softc *);
203 
204 void	 myx_start(struct ifqueue *);
205 void	 myx_write_txd_tail(struct myx_softc *, struct myx_slot *, u_int8_t,
206 	    u_int32_t, u_int);
207 int	 myx_load_mbuf(struct myx_softc *, struct myx_slot *, struct mbuf *);
208 int	 myx_setlladdr(struct myx_softc *, u_int32_t, u_int8_t *);
209 int	 myx_intr(void *);
210 void	 myx_rxeof(struct myx_softc *);
211 void	 myx_txeof(struct myx_softc *, u_int32_t);
212 
213 int			myx_buf_fill(struct myx_softc *, struct myx_slot *,
214 			    struct mbuf *(*)(void));
215 struct mbuf *		myx_mcl_small(void);
216 struct mbuf *		myx_mcl_big(void);
217 
218 int			myx_rx_init(struct myx_softc *, int, bus_size_t);
219 int			myx_rx_fill(struct myx_softc *, struct myx_rx_ring *);
220 void			myx_rx_empty(struct myx_softc *, struct myx_rx_ring *);
221 void			myx_rx_free(struct myx_softc *, struct myx_rx_ring *);
222 
223 int			myx_tx_init(struct myx_softc *, bus_size_t);
224 void			myx_tx_empty(struct myx_softc *);
225 void			myx_tx_free(struct myx_softc *);
226 
227 void			myx_refill(void *);
228 
229 struct cfdriver myx_cd = {
230 	NULL, "myx", DV_IFNET
231 };
232 struct cfattach myx_ca = {
233 	sizeof(struct myx_softc), myx_match, myx_attach
234 };
235 
236 const struct pci_matchid myx_devices[] = {
237 	{ PCI_VENDOR_MYRICOM, PCI_PRODUCT_MYRICOM_Z8E },
238 	{ PCI_VENDOR_MYRICOM, PCI_PRODUCT_MYRICOM_Z8E_9 }
239 };
240 
241 int
242 myx_match(struct device *parent, void *match, void *aux)
243 {
244 	return (pci_matchbyid(aux, myx_devices, nitems(myx_devices)));
245 }
246 
247 void
248 myx_attach(struct device *parent, struct device *self, void *aux)
249 {
250 	struct myx_softc	*sc = (struct myx_softc *)self;
251 	struct pci_attach_args	*pa = aux;
252 	char			 part[32];
253 	pcireg_t		 memtype;
254 
255 	sc->sc_pc = pa->pa_pc;
256 	sc->sc_tag = pa->pa_tag;
257 	sc->sc_dmat = pa->pa_dmat;
258 
259 	sc->sc_rx_ring[MYX_RXSMALL].mrr_softc = sc;
260 	sc->sc_rx_ring[MYX_RXSMALL].mrr_mclget = myx_mcl_small;
261 	timeout_set(&sc->sc_rx_ring[MYX_RXSMALL].mrr_refill, myx_refill,
262 	    &sc->sc_rx_ring[MYX_RXSMALL]);
263 	sc->sc_rx_ring[MYX_RXBIG].mrr_softc = sc;
264 	sc->sc_rx_ring[MYX_RXBIG].mrr_mclget = myx_mcl_big;
265 	timeout_set(&sc->sc_rx_ring[MYX_RXBIG].mrr_refill, myx_refill,
266 	    &sc->sc_rx_ring[MYX_RXBIG]);
267 
268 	/* Map the PCI memory space */
269 	memtype = pci_mapreg_type(sc->sc_pc, sc->sc_tag, MYXBAR0);
270 	if (pci_mapreg_map(pa, MYXBAR0, memtype, BUS_SPACE_MAP_PREFETCHABLE,
271 	    &sc->sc_memt, &sc->sc_memh, NULL, &sc->sc_mems, 0)) {
272 		printf(": unable to map register memory\n");
273 		return;
274 	}
275 
276 	/* Get board details (mac/part) */
277 	memset(part, 0, sizeof(part));
278 	if (myx_query(sc, part, sizeof(part)) != 0)
279 		goto unmap;
280 
281 	/* Map the interrupt */
282 	if (pci_intr_map_msi(pa, &sc->sc_ih) != 0) {
283 		if (pci_intr_map(pa, &sc->sc_ih) != 0) {
284 			printf(": unable to map interrupt\n");
285 			goto unmap;
286 		}
287 		sc->sc_intx = 1;
288 	}
289 
290 	printf(": %s, model %s, address %s\n",
291 	    pci_intr_string(pa->pa_pc, sc->sc_ih),
292 	    part[0] == '\0' ? "(unknown)" : part,
293 	    ether_sprintf(sc->sc_ac.ac_enaddr));
294 
295 	/* this is sort of racy */
296 	if (myx_mcl_pool == NULL) {
297 		myx_mcl_pool = malloc(sizeof(*myx_mcl_pool), M_DEVBUF,
298 		    M_WAITOK);
299 		if (myx_mcl_pool == NULL) {
300 			printf("%s: unable to allocate mcl pool\n",
301 			    DEVNAME(sc));
302 			goto unmap;
303 		}
304 
305 		m_pool_init(myx_mcl_pool, MYX_RXBIG_SIZE, MYX_BOUNDARY,
306 		    "myxmcl");
307 	}
308 
309 	if (myx_pcie_dc(sc, pa) != 0)
310 		printf("%s: unable to configure PCI Express\n", DEVNAME(sc));
311 
312 	config_mountroot(self, myx_attachhook);
313 
314 	return;
315 
316  unmap:
317 	bus_space_unmap(sc->sc_memt, sc->sc_memh, sc->sc_mems);
318 	sc->sc_mems = 0;
319 }
320 
321 int
322 myx_pcie_dc(struct myx_softc *sc, struct pci_attach_args *pa)
323 {
324 	pcireg_t dcsr;
325 	pcireg_t mask = PCI_PCIE_DCSR_MPS | PCI_PCIE_DCSR_ERO;
326 	pcireg_t dc = ((fls(4096) - 8) << 12) | PCI_PCIE_DCSR_ERO;
327 	int reg;
328 
329 	if (pci_get_capability(sc->sc_pc, pa->pa_tag, PCI_CAP_PCIEXPRESS,
330 	    &reg, NULL) == 0)
331 		return (-1);
332 
333 	reg += PCI_PCIE_DCSR;
334 	dcsr = pci_conf_read(sc->sc_pc, pa->pa_tag, reg);
335 	if ((dcsr & mask) != dc) {
336 		CLR(dcsr, mask);
337 		SET(dcsr, dc);
338 		pci_conf_write(sc->sc_pc, pa->pa_tag, reg, dcsr);
339 	}
340 
341 	return (0);
342 }
343 
344 u_int
345 myx_ether_aton(char *mac, u_int8_t *lladdr, u_int maxlen)
346 {
347 	u_int		i, j;
348 	u_int8_t	digit;
349 
350 	memset(lladdr, 0, ETHER_ADDR_LEN);
351 	for (i = j = 0; mac[i] != '\0' && i < maxlen; i++) {
352 		if (mac[i] >= '0' && mac[i] <= '9')
353 			digit = mac[i] - '0';
354 		else if (mac[i] >= 'A' && mac[i] <= 'F')
355 			digit = mac[i] - 'A' + 10;
356 		else if (mac[i] >= 'a' && mac[i] <= 'f')
357 			digit = mac[i] - 'a' + 10;
358 		else
359 			continue;
360 		if ((j & 1) == 0)
361 			digit <<= 4;
362 		lladdr[j++/2] |= digit;
363 	}
364 
365 	return (i);
366 }
367 
368 int
369 myx_query(struct myx_softc *sc, char *part, size_t partlen)
370 {
371 	struct myx_gen_hdr hdr;
372 	u_int32_t	offset;
373 	u_int8_t	strings[MYX_STRING_SPECS_SIZE];
374 	u_int		i, len, maxlen;
375 
376 	myx_read(sc, MYX_HEADER_POS, &offset, sizeof(offset));
377 	offset = betoh32(offset);
378 	if (offset + sizeof(hdr) > sc->sc_mems) {
379 		printf(": header is outside register window\n");
380 		return (1);
381 	}
382 
383 	myx_read(sc, offset, &hdr, sizeof(hdr));
384 	offset = betoh32(hdr.fw_specs);
385 	len = min(betoh32(hdr.fw_specs_len), sizeof(strings));
386 
387 	bus_space_read_region_1(sc->sc_memt, sc->sc_memh, offset, strings, len);
388 
389 	for (i = 0; i < len; i++) {
390 		maxlen = len - i;
391 		if (strings[i] == '\0')
392 			break;
393 		if (maxlen > 4 && memcmp("MAC=", &strings[i], 4) == 0) {
394 			i += 4;
395 			i += myx_ether_aton(&strings[i],
396 			    sc->sc_ac.ac_enaddr, maxlen);
397 		} else if (maxlen > 3 && memcmp("PC=", &strings[i], 3) == 0) {
398 			i += 3;
399 			i += strlcpy(part, &strings[i], min(maxlen, partlen));
400 		}
401 		for (; i < len; i++) {
402 			if (strings[i] == '\0')
403 				break;
404 		}
405 	}
406 
407 	return (0);
408 }
409 
410 int
411 myx_loadfirmware(struct myx_softc *sc, const char *filename)
412 {
413 	struct myx_gen_hdr	hdr;
414 	u_int8_t		*fw;
415 	size_t			fwlen;
416 	u_int32_t		offset;
417 	u_int			i, ret = 1;
418 
419 	if (loadfirmware(filename, &fw, &fwlen) != 0) {
420 		printf("%s: could not load firmware %s\n", DEVNAME(sc),
421 		    filename);
422 		return (1);
423 	}
424 	if (fwlen > MYX_SRAM_SIZE || fwlen < MYXFW_MIN_LEN) {
425 		printf("%s: invalid firmware %s size\n", DEVNAME(sc), filename);
426 		goto err;
427 	}
428 
429 	memcpy(&offset, fw + MYX_HEADER_POS, sizeof(offset));
430 	offset = betoh32(offset);
431 	if ((offset + sizeof(hdr)) > fwlen) {
432 		printf("%s: invalid firmware %s\n", DEVNAME(sc), filename);
433 		goto err;
434 	}
435 
436 	memcpy(&hdr, fw + offset, sizeof(hdr));
437 	DPRINTF(MYXDBG_INIT, "%s: "
438 	    "fw hdr off %u, length %u, type 0x%x, version %s\n",
439 	    DEVNAME(sc), offset, betoh32(hdr.fw_hdrlength),
440 	    betoh32(hdr.fw_type), hdr.fw_version);
441 
442 	if (betoh32(hdr.fw_type) != MYXFW_TYPE_ETH ||
443 	    memcmp(MYXFW_VER, hdr.fw_version, strlen(MYXFW_VER)) != 0) {
444 		printf("%s: invalid firmware type 0x%x version %s\n",
445 		    DEVNAME(sc), betoh32(hdr.fw_type), hdr.fw_version);
446 		goto err;
447 	}
448 
449 	/* Write the firmware to the card's SRAM */
450 	for (i = 0; i < fwlen; i += 256)
451 		myx_write(sc, i + MYX_FW, fw + i, min(256, fwlen - i));
452 
453 	if (myx_boot(sc, fwlen) != 0) {
454 		printf("%s: failed to boot %s\n", DEVNAME(sc), filename);
455 		goto err;
456 	}
457 
458 	ret = 0;
459 
460 err:
461 	free(fw, M_DEVBUF, fwlen);
462 	return (ret);
463 }
464 
465 void
466 myx_attachhook(struct device *self)
467 {
468 	struct myx_softc	*sc = (struct myx_softc *)self;
469 	struct ifnet		*ifp = &sc->sc_ac.ac_if;
470 	struct myx_cmd		 mc;
471 
472 	/* Allocate command DMA memory */
473 	if (myx_dmamem_alloc(sc, &sc->sc_cmddma, MYXALIGN_CMD,
474 	    MYXALIGN_CMD) != 0) {
475 		printf("%s: failed to allocate command DMA memory\n",
476 		    DEVNAME(sc));
477 		return;
478 	}
479 
480 	/* Try the firmware stored on disk */
481 	if (myx_loadfirmware(sc, MYXFW_ALIGNED) != 0) {
482 		/* error printed by myx_loadfirmware */
483 		goto freecmd;
484 	}
485 
486 	memset(&mc, 0, sizeof(mc));
487 
488 	if (myx_cmd(sc, MYXCMD_RESET, &mc, NULL) != 0) {
489 		printf("%s: failed to reset the device\n", DEVNAME(sc));
490 		goto freecmd;
491 	}
492 
493 	sc->sc_tx_boundary = 4096;
494 
495 	if (myx_probe_firmware(sc) != 0) {
496 		printf("%s: error while selecting firmware\n", DEVNAME(sc));
497 		goto freecmd;
498 	}
499 
500 	sc->sc_irqh = pci_intr_establish(sc->sc_pc, sc->sc_ih,
501 	    IPL_NET | IPL_MPSAFE, myx_intr, sc, DEVNAME(sc));
502 	if (sc->sc_irqh == NULL) {
503 		printf("%s: unable to establish interrupt\n", DEVNAME(sc));
504 		goto freecmd;
505 	}
506 
507 	ifp->if_softc = sc;
508 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
509 	ifp->if_xflags = IFXF_MPSAFE;
510 	ifp->if_ioctl = myx_ioctl;
511 	ifp->if_qstart = myx_start;
512 	ifp->if_watchdog = myx_watchdog;
513 	ifp->if_hardmtu = MYX_RXBIG_SIZE;
514 	strlcpy(ifp->if_xname, DEVNAME(sc), IFNAMSIZ);
515 	IFQ_SET_MAXLEN(&ifp->if_snd, 1);
516 
517 	ifp->if_capabilities = IFCAP_VLAN_MTU;
518 #if 0
519 	ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING;
520 	ifp->if_capabilities |= IFCAP_CSUM_IPv4 | IFCAP_CSUM_TCPv4 |
521 	    IFCAP_CSUM_UDPv4;
522 #endif
523 
524 	ifmedia_init(&sc->sc_media, 0, myx_media_change, myx_media_status);
525 	ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_AUTO, 0, NULL);
526 	ifmedia_set(&sc->sc_media, IFM_ETHER | IFM_AUTO);
527 
528 	if_attach(ifp);
529 	ether_ifattach(ifp);
530 
531 	return;
532 
533 freecmd:
534 	myx_dmamem_free(sc, &sc->sc_cmddma);
535 }
536 
537 int
538 myx_probe_firmware(struct myx_softc *sc)
539 {
540 	struct myx_dmamem test;
541 	bus_dmamap_t map;
542 	struct myx_cmd mc;
543 	pcireg_t csr;
544 	int offset;
545 	int width = 0;
546 
547 	if (pci_get_capability(sc->sc_pc, sc->sc_tag, PCI_CAP_PCIEXPRESS,
548 	    &offset, NULL)) {
549 		csr = pci_conf_read(sc->sc_pc, sc->sc_tag,
550 		    offset + PCI_PCIE_LCSR);
551 		width = (csr >> 20) & 0x3f;
552 
553 		if (width <= 4) {
554 			/*
555 			 * if the link width is 4 or less we can use the
556 			 * aligned firmware.
557 			 */
558 			return (0);
559 		}
560 	}
561 
562 	if (myx_dmamem_alloc(sc, &test, 4096, 4096) != 0)
563 		return (1);
564 	map = test.mxm_map;
565 
566 	bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
567 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
568 
569 	memset(&mc, 0, sizeof(mc));
570 	mc.mc_data0 = htobe32(MYX_ADDRLOW(map->dm_segs[0].ds_addr));
571 	mc.mc_data1 = htobe32(MYX_ADDRHIGH(map->dm_segs[0].ds_addr));
572 	mc.mc_data2 = htobe32(4096 * 0x10000);
573 	if (myx_cmd(sc, MYXCMD_UNALIGNED_DMA_TEST, &mc, NULL) != 0) {
574 		printf("%s: DMA read test failed\n", DEVNAME(sc));
575 		goto fail;
576 	}
577 
578 	memset(&mc, 0, sizeof(mc));
579 	mc.mc_data0 = htobe32(MYX_ADDRLOW(map->dm_segs[0].ds_addr));
580 	mc.mc_data1 = htobe32(MYX_ADDRHIGH(map->dm_segs[0].ds_addr));
581 	mc.mc_data2 = htobe32(4096 * 0x1);
582 	if (myx_cmd(sc, MYXCMD_UNALIGNED_DMA_TEST, &mc, NULL) != 0) {
583 		printf("%s: DMA write test failed\n", DEVNAME(sc));
584 		goto fail;
585 	}
586 
587 	memset(&mc, 0, sizeof(mc));
588 	mc.mc_data0 = htobe32(MYX_ADDRLOW(map->dm_segs[0].ds_addr));
589 	mc.mc_data1 = htobe32(MYX_ADDRHIGH(map->dm_segs[0].ds_addr));
590 	mc.mc_data2 = htobe32(4096 * 0x10001);
591 	if (myx_cmd(sc, MYXCMD_UNALIGNED_DMA_TEST, &mc, NULL) != 0) {
592 		printf("%s: DMA read/write test failed\n", DEVNAME(sc));
593 		goto fail;
594 	}
595 
596 	bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
597 	    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
598 	myx_dmamem_free(sc, &test);
599 	return (0);
600 
601 fail:
602 	bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
603 	    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
604 	myx_dmamem_free(sc, &test);
605 
606 	if (myx_loadfirmware(sc, MYXFW_UNALIGNED) != 0) {
607 		printf("%s: unable to load %s\n", DEVNAME(sc),
608 		    MYXFW_UNALIGNED);
609 		return (1);
610 	}
611 
612 	sc->sc_tx_boundary = 2048;
613 
614 	printf("%s: using unaligned firmware\n", DEVNAME(sc));
615 	return (0);
616 }
617 
618 void
619 myx_read(struct myx_softc *sc, bus_size_t off, void *ptr, bus_size_t len)
620 {
621 	bus_space_barrier(sc->sc_memt, sc->sc_memh, off, len,
622 	    BUS_SPACE_BARRIER_READ);
623 	bus_space_read_raw_region_4(sc->sc_memt, sc->sc_memh, off, ptr, len);
624 }
625 
626 void
627 myx_write(struct myx_softc *sc, bus_size_t off, void *ptr, bus_size_t len)
628 {
629 	bus_space_write_raw_region_4(sc->sc_memt, sc->sc_memh, off, ptr, len);
630 	bus_space_barrier(sc->sc_memt, sc->sc_memh, off, len,
631 	    BUS_SPACE_BARRIER_WRITE);
632 }
633 
634 int
635 myx_dmamem_alloc(struct myx_softc *sc, struct myx_dmamem *mxm,
636     bus_size_t size, u_int align)
637 {
638 	mxm->mxm_size = size;
639 
640 	if (bus_dmamap_create(sc->sc_dmat, mxm->mxm_size, 1,
641 	    mxm->mxm_size, 0, BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW,
642 	    &mxm->mxm_map) != 0)
643 		return (1);
644 	if (bus_dmamem_alloc(sc->sc_dmat, mxm->mxm_size,
645 	    align, 0, &mxm->mxm_seg, 1, &mxm->mxm_nsegs,
646 	    BUS_DMA_WAITOK | BUS_DMA_ZERO) != 0)
647 		goto destroy;
648 	if (bus_dmamem_map(sc->sc_dmat, &mxm->mxm_seg, mxm->mxm_nsegs,
649 	    mxm->mxm_size, &mxm->mxm_kva, BUS_DMA_WAITOK) != 0)
650 		goto free;
651 	if (bus_dmamap_load(sc->sc_dmat, mxm->mxm_map, mxm->mxm_kva,
652 	    mxm->mxm_size, NULL, BUS_DMA_WAITOK) != 0)
653 		goto unmap;
654 
655 	return (0);
656  unmap:
657 	bus_dmamem_unmap(sc->sc_dmat, mxm->mxm_kva, mxm->mxm_size);
658  free:
659 	bus_dmamem_free(sc->sc_dmat, &mxm->mxm_seg, 1);
660  destroy:
661 	bus_dmamap_destroy(sc->sc_dmat, mxm->mxm_map);
662 	return (1);
663 }
664 
665 void
666 myx_dmamem_free(struct myx_softc *sc, struct myx_dmamem *mxm)
667 {
668 	bus_dmamap_unload(sc->sc_dmat, mxm->mxm_map);
669 	bus_dmamem_unmap(sc->sc_dmat, mxm->mxm_kva, mxm->mxm_size);
670 	bus_dmamem_free(sc->sc_dmat, &mxm->mxm_seg, 1);
671 	bus_dmamap_destroy(sc->sc_dmat, mxm->mxm_map);
672 }
673 
674 int
675 myx_cmd(struct myx_softc *sc, u_int32_t cmd, struct myx_cmd *mc, u_int32_t *r)
676 {
677 	bus_dmamap_t		 map = sc->sc_cmddma.mxm_map;
678 	struct myx_response	*mr;
679 	u_int			 i;
680 	u_int32_t		 result, data;
681 #ifdef MYX_DEBUG
682 	static const char *cmds[MYXCMD_MAX] = {
683 		"CMD_NONE",
684 		"CMD_RESET",
685 		"CMD_GET_VERSION",
686 		"CMD_SET_INTRQDMA",
687 		"CMD_SET_BIGBUFSZ",
688 		"CMD_SET_SMALLBUFSZ",
689 		"CMD_GET_TXRINGOFF",
690 		"CMD_GET_RXSMALLRINGOFF",
691 		"CMD_GET_RXBIGRINGOFF",
692 		"CMD_GET_INTRACKOFF",
693 		"CMD_GET_INTRDEASSERTOFF",
694 		"CMD_GET_TXRINGSZ",
695 		"CMD_GET_RXRINGSZ",
696 		"CMD_SET_INTRQSZ",
697 		"CMD_SET_IFUP",
698 		"CMD_SET_IFDOWN",
699 		"CMD_SET_MTU",
700 		"CMD_GET_INTRCOALDELAYOFF",
701 		"CMD_SET_STATSINTVL",
702 		"CMD_SET_STATSDMA_OLD",
703 		"CMD_SET_PROMISC",
704 		"CMD_UNSET_PROMISC",
705 		"CMD_SET_LLADDR",
706 		"CMD_SET_FC",
707 		"CMD_UNSET_FC",
708 		"CMD_DMA_TEST",
709 		"CMD_SET_ALLMULTI",
710 		"CMD_UNSET_ALLMULTI",
711 		"CMD_SET_MCASTGROUP",
712 		"CMD_UNSET_MCASTGROUP",
713 		"CMD_UNSET_MCAST",
714 		"CMD_SET_STATSDMA",
715 		"CMD_UNALIGNED_DMA_TEST",
716 		"CMD_GET_UNALIGNED_STATUS"
717 	};
718 #endif
719 
720 	mc->mc_cmd = htobe32(cmd);
721 	mc->mc_addr_high = htobe32(MYX_ADDRHIGH(map->dm_segs[0].ds_addr));
722 	mc->mc_addr_low = htobe32(MYX_ADDRLOW(map->dm_segs[0].ds_addr));
723 
724 	mr = (struct myx_response *)sc->sc_cmddma.mxm_kva;
725 	mr->mr_result = 0xffffffff;
726 
727 	/* Send command */
728 	myx_write(sc, MYX_CMD, (u_int8_t *)mc, sizeof(struct myx_cmd));
729 	bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
730 	    BUS_DMASYNC_PREREAD);
731 
732 	for (i = 0; i < 20; i++) {
733 		bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
734 		    BUS_DMASYNC_POSTREAD);
735 		result = betoh32(mr->mr_result);
736 		data = betoh32(mr->mr_data);
737 
738 		if (result != 0xffffffff)
739 			break;
740 
741 		bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
742 		    BUS_DMASYNC_PREREAD);
743 		delay(1000);
744 	}
745 
746 	DPRINTF(MYXDBG_CMD, "%s(%s): %s completed, i %d, "
747 	    "result 0x%x, data 0x%x (%u)\n", DEVNAME(sc), __func__,
748 	    cmds[cmd], i, result, data, data);
749 
750 	if (result != 0)
751 		return (-1);
752 
753 	if (r != NULL)
754 		*r = data;
755 	return (0);
756 }
757 
758 int
759 myx_boot(struct myx_softc *sc, u_int32_t length)
760 {
761 	struct myx_bootcmd	 bc;
762 	bus_dmamap_t		 map = sc->sc_cmddma.mxm_map;
763 	u_int32_t		*status;
764 	u_int			 i, ret = 1;
765 
766 	memset(&bc, 0, sizeof(bc));
767 	bc.bc_addr_high = htobe32(MYX_ADDRHIGH(map->dm_segs[0].ds_addr));
768 	bc.bc_addr_low = htobe32(MYX_ADDRLOW(map->dm_segs[0].ds_addr));
769 	bc.bc_result = 0xffffffff;
770 	bc.bc_offset = htobe32(MYX_FW_BOOT);
771 	bc.bc_length = htobe32(length - 8);
772 	bc.bc_copyto = htobe32(8);
773 	bc.bc_jumpto = htobe32(0);
774 
775 	status = (u_int32_t *)sc->sc_cmddma.mxm_kva;
776 	*status = 0;
777 
778 	/* Send command */
779 	myx_write(sc, MYX_BOOT, &bc, sizeof(bc));
780 	bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
781 	    BUS_DMASYNC_PREREAD);
782 
783 	for (i = 0; i < 200; i++) {
784 		bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
785 		    BUS_DMASYNC_POSTREAD);
786 		if (*status == 0xffffffff) {
787 			ret = 0;
788 			break;
789 		}
790 
791 		bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
792 		    BUS_DMASYNC_PREREAD);
793 		delay(1000);
794 	}
795 
796 	DPRINTF(MYXDBG_CMD, "%s: boot completed, i %d, result %d\n",
797 	    DEVNAME(sc), i, ret);
798 
799 	return (ret);
800 }
801 
802 int
803 myx_rdma(struct myx_softc *sc, u_int do_enable)
804 {
805 	struct myx_rdmacmd	 rc;
806 	bus_dmamap_t		 map = sc->sc_cmddma.mxm_map;
807 	bus_dmamap_t		 pad = sc->sc_paddma.mxm_map;
808 	u_int32_t		*status;
809 	int			 ret = 1;
810 	u_int			 i;
811 
812 	/*
813 	 * It is required to setup a _dummy_ RDMA address. It also makes
814 	 * some PCI-E chipsets resend dropped messages.
815 	 */
816 	rc.rc_addr_high = htobe32(MYX_ADDRHIGH(map->dm_segs[0].ds_addr));
817 	rc.rc_addr_low = htobe32(MYX_ADDRLOW(map->dm_segs[0].ds_addr));
818 	rc.rc_result = 0xffffffff;
819 	rc.rc_rdma_high = htobe32(MYX_ADDRHIGH(pad->dm_segs[0].ds_addr));
820 	rc.rc_rdma_low = htobe32(MYX_ADDRLOW(pad->dm_segs[0].ds_addr));
821 	rc.rc_enable = htobe32(do_enable);
822 
823 	status = (u_int32_t *)sc->sc_cmddma.mxm_kva;
824 	*status = 0;
825 
826 	bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
827 	    BUS_DMASYNC_PREREAD);
828 
829 	/* Send command */
830 	myx_write(sc, MYX_RDMA, &rc, sizeof(rc));
831 
832 	for (i = 0; i < 20; i++) {
833 		bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
834 		    BUS_DMASYNC_POSTREAD);
835 
836 		if (*status == 0xffffffff) {
837 			ret = 0;
838 			break;
839 		}
840 
841 		bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
842 		    BUS_DMASYNC_PREREAD);
843 		delay(1000);
844 	}
845 
846 	DPRINTF(MYXDBG_CMD, "%s(%s): dummy RDMA %s, i %d, result 0x%x\n",
847 	    DEVNAME(sc), __func__,
848 	    do_enable ? "enabled" : "disabled", i, betoh32(*status));
849 
850 	return (ret);
851 }
852 
853 int
854 myx_media_change(struct ifnet *ifp)
855 {
856 	/* ignore */
857 	return (0);
858 }
859 
860 void
861 myx_media_status(struct ifnet *ifp, struct ifmediareq *imr)
862 {
863 	struct myx_softc	*sc = (struct myx_softc *)ifp->if_softc;
864 	bus_dmamap_t		 map = sc->sc_sts_dma.mxm_map;
865 	u_int32_t		 sts;
866 
867 	imr->ifm_active = IFM_ETHER | IFM_AUTO;
868 	if (!ISSET(ifp->if_flags, IFF_RUNNING)) {
869 		imr->ifm_status = 0;
870 		return;
871 	}
872 
873 	bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
874 	    BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
875 	sts = sc->sc_sts->ms_linkstate;
876 	bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
877 	    BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
878 
879 	myx_link_state(sc, sts);
880 
881 	imr->ifm_status = IFM_AVALID;
882 	if (!LINK_STATE_IS_UP(ifp->if_link_state))
883 		return;
884 
885 	imr->ifm_active |= IFM_FDX | IFM_FLOW |
886 	    IFM_ETH_RXPAUSE | IFM_ETH_TXPAUSE;
887 	imr->ifm_status |= IFM_ACTIVE;
888 }
889 
890 void
891 myx_link_state(struct myx_softc *sc, u_int32_t sts)
892 {
893 	struct ifnet		*ifp = &sc->sc_ac.ac_if;
894 	int			 link_state = LINK_STATE_DOWN;
895 
896 	if (betoh32(sts) == MYXSTS_LINKUP)
897 		link_state = LINK_STATE_FULL_DUPLEX;
898 	if (ifp->if_link_state != link_state) {
899 		ifp->if_link_state = link_state;
900 		if_link_state_change(ifp);
901 		ifp->if_baudrate = LINK_STATE_IS_UP(ifp->if_link_state) ?
902 		    IF_Gbps(10) : 0;
903 	}
904 }
905 
906 void
907 myx_watchdog(struct ifnet *ifp)
908 {
909 	return;
910 }
911 
912 int
913 myx_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
914 {
915 	struct myx_softc	*sc = (struct myx_softc *)ifp->if_softc;
916 	struct ifreq		*ifr = (struct ifreq *)data;
917 	int			 s, error = 0;
918 
919 	s = splnet();
920 
921 	switch (cmd) {
922 	case SIOCSIFADDR:
923 		ifp->if_flags |= IFF_UP;
924 		/* FALLTHROUGH */
925 
926 	case SIOCSIFFLAGS:
927 		if (ISSET(ifp->if_flags, IFF_UP)) {
928 			if (ISSET(ifp->if_flags, IFF_RUNNING))
929 				error = ENETRESET;
930 			else
931 				myx_up(sc);
932 		} else {
933 			if (ISSET(ifp->if_flags, IFF_RUNNING))
934 				myx_down(sc);
935 		}
936 		break;
937 
938 	case SIOCGIFMEDIA:
939 	case SIOCSIFMEDIA:
940 		error = ifmedia_ioctl(ifp, ifr, &sc->sc_media, cmd);
941 		break;
942 
943 	case SIOCGIFRXR:
944 		error = myx_rxrinfo(sc, (struct if_rxrinfo *)ifr->ifr_data);
945 		break;
946 
947 	default:
948 		error = ether_ioctl(ifp, &sc->sc_ac, cmd, data);
949 	}
950 
951 	if (error == ENETRESET) {
952 		if ((ifp->if_flags & (IFF_UP | IFF_RUNNING)) ==
953 		    (IFF_UP | IFF_RUNNING))
954 			myx_iff(sc);
955 		error = 0;
956 	}
957 
958 	splx(s);
959 	return (error);
960 }
961 
962 int
963 myx_rxrinfo(struct myx_softc *sc, struct if_rxrinfo *ifri)
964 {
965 	struct if_rxring_info ifr[2];
966 
967 	memset(ifr, 0, sizeof(ifr));
968 
969 	ifr[0].ifr_size = MYX_RXSMALL_SIZE;
970 	ifr[0].ifr_info = sc->sc_rx_ring[0].mrr_rxr;
971 
972 	ifr[1].ifr_size = MYX_RXBIG_SIZE;
973 	ifr[1].ifr_info = sc->sc_rx_ring[1].mrr_rxr;
974 
975 	return (if_rxr_info_ioctl(ifri, nitems(ifr), ifr));
976 }
977 
978 void
979 myx_up(struct myx_softc *sc)
980 {
981 	struct ifnet		*ifp = &sc->sc_ac.ac_if;
982 	struct myx_cmd		mc;
983 	bus_dmamap_t		map;
984 	size_t			size;
985 	u_int			maxpkt;
986 	u_int32_t		r;
987 
988 	memset(&mc, 0, sizeof(mc));
989 	if (myx_cmd(sc, MYXCMD_RESET, &mc, NULL) != 0) {
990 		printf("%s: failed to reset the device\n", DEVNAME(sc));
991 		return;
992 	}
993 
994 	if (myx_dmamem_alloc(sc, &sc->sc_zerodma,
995 	    64, MYXALIGN_CMD) != 0) {
996 		printf("%s: failed to allocate zero pad memory\n",
997 		    DEVNAME(sc));
998 		return;
999 	}
1000 	memset(sc->sc_zerodma.mxm_kva, 0, 64);
1001 	bus_dmamap_sync(sc->sc_dmat, sc->sc_zerodma.mxm_map, 0,
1002 	    sc->sc_zerodma.mxm_map->dm_mapsize, BUS_DMASYNC_PREREAD);
1003 
1004 	if (myx_dmamem_alloc(sc, &sc->sc_paddma,
1005 	    MYXALIGN_CMD, MYXALIGN_CMD) != 0) {
1006 		printf("%s: failed to allocate pad DMA memory\n",
1007 		    DEVNAME(sc));
1008 		goto free_zero;
1009 	}
1010 	bus_dmamap_sync(sc->sc_dmat, sc->sc_paddma.mxm_map, 0,
1011 	    sc->sc_paddma.mxm_map->dm_mapsize,
1012 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1013 
1014 	if (myx_rdma(sc, MYXRDMA_ON) != 0) {
1015 		printf("%s: failed to enable dummy RDMA\n", DEVNAME(sc));
1016 		goto free_pad;
1017 	}
1018 
1019 	if (myx_cmd(sc, MYXCMD_GET_RXRINGSZ, &mc, &r) != 0) {
1020 		printf("%s: unable to get rx ring size\n", DEVNAME(sc));
1021 		goto free_pad;
1022 	}
1023 	sc->sc_rx_ring_count = r / sizeof(struct myx_rx_desc);
1024 
1025 	memset(&mc, 0, sizeof(mc));
1026 	if (myx_cmd(sc, MYXCMD_GET_TXRINGSZ, &mc, &r) != 0) {
1027 		printf("%s: unable to get tx ring size\n", DEVNAME(sc));
1028 		goto free_pad;
1029 	}
1030 	sc->sc_tx_ring_prod = 0;
1031 	sc->sc_tx_ring_cons = 0;
1032 	sc->sc_tx_ring_count = r / sizeof(struct myx_tx_desc);
1033 	sc->sc_tx_nsegs = min(16, sc->sc_tx_ring_count / 4); /* magic */
1034 	sc->sc_tx_count = 0;
1035 	IFQ_SET_MAXLEN(&ifp->if_snd, sc->sc_tx_ring_count - 1);
1036 
1037 	/* Allocate Interrupt Queue */
1038 
1039 	sc->sc_intrq_count = sc->sc_rx_ring_count * 2;
1040 	sc->sc_intrq_idx = 0;
1041 
1042 	size = sc->sc_intrq_count * sizeof(struct myx_intrq_desc);
1043 	if (myx_dmamem_alloc(sc, &sc->sc_intrq_dma,
1044 	    size, MYXALIGN_DATA) != 0) {
1045 		goto free_pad;
1046 	}
1047 	sc->sc_intrq = (struct myx_intrq_desc *)sc->sc_intrq_dma.mxm_kva;
1048 	map = sc->sc_intrq_dma.mxm_map;
1049 	memset(sc->sc_intrq, 0, size);
1050 	bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
1051 	    BUS_DMASYNC_PREREAD);
1052 
1053 	memset(&mc, 0, sizeof(mc));
1054 	mc.mc_data0 = htobe32(size);
1055 	if (myx_cmd(sc, MYXCMD_SET_INTRQSZ, &mc, NULL) != 0) {
1056 		printf("%s: failed to set intrq size\n", DEVNAME(sc));
1057 		goto free_intrq;
1058 	}
1059 
1060 	memset(&mc, 0, sizeof(mc));
1061 	mc.mc_data0 = htobe32(MYX_ADDRLOW(map->dm_segs[0].ds_addr));
1062 	mc.mc_data1 = htobe32(MYX_ADDRHIGH(map->dm_segs[0].ds_addr));
1063 	if (myx_cmd(sc, MYXCMD_SET_INTRQDMA, &mc, NULL) != 0) {
1064 		printf("%s: failed to set intrq address\n", DEVNAME(sc));
1065 		goto free_intrq;
1066 	}
1067 
1068 	/*
1069 	 * get interrupt offsets
1070 	 */
1071 
1072 	memset(&mc, 0, sizeof(mc));
1073 	if (myx_cmd(sc, MYXCMD_GET_INTRACKOFF, &mc,
1074 	    &sc->sc_irqclaimoff) != 0) {
1075 		printf("%s: failed to get IRQ ack offset\n", DEVNAME(sc));
1076 		goto free_intrq;
1077 	}
1078 
1079 	memset(&mc, 0, sizeof(mc));
1080 	if (myx_cmd(sc, MYXCMD_GET_INTRDEASSERTOFF, &mc,
1081 	    &sc->sc_irqdeassertoff) != 0) {
1082 		printf("%s: failed to get IRQ deassert offset\n", DEVNAME(sc));
1083 		goto free_intrq;
1084 	}
1085 
1086 	memset(&mc, 0, sizeof(mc));
1087 	if (myx_cmd(sc, MYXCMD_GET_INTRCOALDELAYOFF, &mc,
1088 	    &sc->sc_irqcoaloff) != 0) {
1089 		printf("%s: failed to get IRQ coal offset\n", DEVNAME(sc));
1090 		goto free_intrq;
1091 	}
1092 
1093 	/* Set an appropriate interrupt coalescing period */
1094 	r = htobe32(MYX_IRQCOALDELAY);
1095 	myx_write(sc, sc->sc_irqcoaloff, &r, sizeof(r));
1096 
1097 	if (myx_setlladdr(sc, MYXCMD_SET_LLADDR, LLADDR(ifp->if_sadl)) != 0) {
1098 		printf("%s: failed to configure lladdr\n", DEVNAME(sc));
1099 		goto free_intrq;
1100 	}
1101 
1102 	memset(&mc, 0, sizeof(mc));
1103 	if (myx_cmd(sc, MYXCMD_UNSET_PROMISC, &mc, NULL) != 0) {
1104 		printf("%s: failed to disable promisc mode\n", DEVNAME(sc));
1105 		goto free_intrq;
1106 	}
1107 
1108 	memset(&mc, 0, sizeof(mc));
1109 	if (myx_cmd(sc, MYXCMD_FC_DEFAULT, &mc, NULL) != 0) {
1110 		printf("%s: failed to configure flow control\n", DEVNAME(sc));
1111 		goto free_intrq;
1112 	}
1113 
1114 	memset(&mc, 0, sizeof(mc));
1115 	if (myx_cmd(sc, MYXCMD_GET_TXRINGOFF, &mc,
1116 	    &sc->sc_tx_ring_offset) != 0) {
1117 		printf("%s: unable to get tx ring offset\n", DEVNAME(sc));
1118 		goto free_intrq;
1119 	}
1120 
1121 	memset(&mc, 0, sizeof(mc));
1122 	if (myx_cmd(sc, MYXCMD_GET_RXSMALLRINGOFF, &mc,
1123 	    &sc->sc_rx_ring[MYX_RXSMALL].mrr_offset) != 0) {
1124 		printf("%s: unable to get small rx ring offset\n", DEVNAME(sc));
1125 		goto free_intrq;
1126 	}
1127 
1128 	memset(&mc, 0, sizeof(mc));
1129 	if (myx_cmd(sc, MYXCMD_GET_RXBIGRINGOFF, &mc,
1130 	    &sc->sc_rx_ring[MYX_RXBIG].mrr_offset) != 0) {
1131 		printf("%s: unable to get big rx ring offset\n", DEVNAME(sc));
1132 		goto free_intrq;
1133 	}
1134 
1135 	/* Allocate Interrupt Data */
1136 	if (myx_dmamem_alloc(sc, &sc->sc_sts_dma,
1137 	    sizeof(struct myx_status), MYXALIGN_DATA) != 0) {
1138 		printf("%s: failed to allocate status DMA memory\n",
1139 		    DEVNAME(sc));
1140 		goto free_intrq;
1141 	}
1142 	sc->sc_sts = (struct myx_status *)sc->sc_sts_dma.mxm_kva;
1143 	map = sc->sc_sts_dma.mxm_map;
1144 	bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
1145 	    BUS_DMASYNC_PREREAD);
1146 
1147 	memset(&mc, 0, sizeof(mc));
1148 	mc.mc_data0 = htobe32(MYX_ADDRLOW(map->dm_segs[0].ds_addr));
1149 	mc.mc_data1 = htobe32(MYX_ADDRHIGH(map->dm_segs[0].ds_addr));
1150 	mc.mc_data2 = htobe32(sizeof(struct myx_status));
1151 	if (myx_cmd(sc, MYXCMD_SET_STATSDMA, &mc, NULL) != 0) {
1152 		printf("%s: failed to set status DMA offset\n", DEVNAME(sc));
1153 		goto free_sts;
1154 	}
1155 
1156 	maxpkt = ifp->if_hardmtu + ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
1157 
1158 	memset(&mc, 0, sizeof(mc));
1159 	mc.mc_data0 = htobe32(maxpkt);
1160 	if (myx_cmd(sc, MYXCMD_SET_MTU, &mc, NULL) != 0) {
1161 		printf("%s: failed to set MTU size %d\n", DEVNAME(sc), maxpkt);
1162 		goto free_sts;
1163 	}
1164 
1165 	if (myx_tx_init(sc, maxpkt) != 0)
1166 		goto free_sts;
1167 
1168 	if (myx_rx_init(sc, MYX_RXSMALL, MCLBYTES) != 0)
1169 		goto free_tx_ring;
1170 
1171 	if (myx_rx_fill(sc, &sc->sc_rx_ring[MYX_RXSMALL]) != 0)
1172 		goto free_rx_ring_small;
1173 
1174 	if (myx_rx_init(sc, MYX_RXBIG, MYX_RXBIG_SIZE) != 0)
1175 		goto empty_rx_ring_small;
1176 
1177 	if (myx_rx_fill(sc, &sc->sc_rx_ring[MYX_RXBIG]) != 0)
1178 		goto free_rx_ring_big;
1179 
1180 	memset(&mc, 0, sizeof(mc));
1181 	mc.mc_data0 = htobe32(MYX_RXSMALL_SIZE - ETHER_ALIGN);
1182 	if (myx_cmd(sc, MYXCMD_SET_SMALLBUFSZ, &mc, NULL) != 0) {
1183 		printf("%s: failed to set small buf size\n", DEVNAME(sc));
1184 		goto empty_rx_ring_big;
1185 	}
1186 
1187 	memset(&mc, 0, sizeof(mc));
1188 	mc.mc_data0 = htobe32(16384);
1189 	if (myx_cmd(sc, MYXCMD_SET_BIGBUFSZ, &mc, NULL) != 0) {
1190 		printf("%s: failed to set big buf size\n", DEVNAME(sc));
1191 		goto empty_rx_ring_big;
1192 	}
1193 
1194 	sc->sc_state = MYX_S_RUNNING;
1195 
1196 	if (myx_cmd(sc, MYXCMD_SET_IFUP, &mc, NULL) != 0) {
1197 		printf("%s: failed to start the device\n", DEVNAME(sc));
1198 		goto empty_rx_ring_big;
1199 	}
1200 
1201 	myx_iff(sc);
1202 	SET(ifp->if_flags, IFF_RUNNING);
1203 	ifq_restart(&ifp->if_snd);
1204 
1205 	return;
1206 
1207 empty_rx_ring_big:
1208 	myx_rx_empty(sc, &sc->sc_rx_ring[MYX_RXBIG]);
1209 free_rx_ring_big:
1210 	myx_rx_free(sc, &sc->sc_rx_ring[MYX_RXBIG]);
1211 empty_rx_ring_small:
1212 	myx_rx_empty(sc, &sc->sc_rx_ring[MYX_RXSMALL]);
1213 free_rx_ring_small:
1214 	myx_rx_free(sc, &sc->sc_rx_ring[MYX_RXSMALL]);
1215 free_tx_ring:
1216 	myx_tx_free(sc);
1217 free_sts:
1218 	bus_dmamap_sync(sc->sc_dmat, sc->sc_sts_dma.mxm_map, 0,
1219 	    sc->sc_sts_dma.mxm_map->dm_mapsize, BUS_DMASYNC_POSTREAD);
1220 	myx_dmamem_free(sc, &sc->sc_sts_dma);
1221 free_intrq:
1222 	bus_dmamap_sync(sc->sc_dmat, sc->sc_intrq_dma.mxm_map, 0,
1223 	    sc->sc_intrq_dma.mxm_map->dm_mapsize, BUS_DMASYNC_POSTREAD);
1224 	myx_dmamem_free(sc, &sc->sc_intrq_dma);
1225 free_pad:
1226 	bus_dmamap_sync(sc->sc_dmat, sc->sc_paddma.mxm_map, 0,
1227 	    sc->sc_paddma.mxm_map->dm_mapsize,
1228 	    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1229 	myx_dmamem_free(sc, &sc->sc_paddma);
1230 
1231 	memset(&mc, 0, sizeof(mc));
1232 	if (myx_cmd(sc, MYXCMD_RESET, &mc, NULL) != 0) {
1233 		printf("%s: failed to reset the device\n", DEVNAME(sc));
1234 	}
1235 free_zero:
1236 	bus_dmamap_sync(sc->sc_dmat, sc->sc_zerodma.mxm_map, 0,
1237 	    sc->sc_zerodma.mxm_map->dm_mapsize, BUS_DMASYNC_POSTREAD);
1238 	myx_dmamem_free(sc, &sc->sc_zerodma);
1239 }
1240 
1241 int
1242 myx_setlladdr(struct myx_softc *sc, u_int32_t cmd, u_int8_t *addr)
1243 {
1244 	struct myx_cmd		 mc;
1245 
1246 	memset(&mc, 0, sizeof(mc));
1247 	mc.mc_data0 = htobe32(addr[0] << 24 | addr[1] << 16 |
1248 	    addr[2] << 8 | addr[3]);
1249 	mc.mc_data1 = htobe32(addr[4] << 8 | addr[5]);
1250 
1251 	if (myx_cmd(sc, cmd, &mc, NULL) != 0) {
1252 		printf("%s: failed to set the lladdr\n", DEVNAME(sc));
1253 		return (-1);
1254 	}
1255 	return (0);
1256 }
1257 
1258 void
1259 myx_iff(struct myx_softc *sc)
1260 {
1261 	struct myx_cmd		mc;
1262 	struct ifnet		*ifp = &sc->sc_ac.ac_if;
1263 	struct ether_multi	*enm;
1264 	struct ether_multistep	step;
1265 	u_int8_t *addr;
1266 
1267 	CLR(ifp->if_flags, IFF_ALLMULTI);
1268 
1269 	if (myx_cmd(sc, ISSET(ifp->if_flags, IFF_PROMISC) ?
1270 	    MYXCMD_SET_PROMISC : MYXCMD_UNSET_PROMISC, &mc, NULL) != 0) {
1271 		printf("%s: failed to configure promisc mode\n", DEVNAME(sc));
1272 		return;
1273 	}
1274 
1275 	if (myx_cmd(sc, MYXCMD_SET_ALLMULTI, &mc, NULL) != 0) {
1276 		printf("%s: failed to enable ALLMULTI\n", DEVNAME(sc));
1277 		return;
1278 	}
1279 
1280 	if (myx_cmd(sc, MYXCMD_UNSET_MCAST, &mc, NULL) != 0) {
1281 		printf("%s: failed to leave all mcast groups \n", DEVNAME(sc));
1282 		return;
1283 	}
1284 
1285 	if (ISSET(ifp->if_flags, IFF_PROMISC) ||
1286 	    sc->sc_ac.ac_multirangecnt > 0) {
1287 		SET(ifp->if_flags, IFF_ALLMULTI);
1288 		return;
1289 	}
1290 
1291 	ETHER_FIRST_MULTI(step, &sc->sc_ac, enm);
1292 	while (enm != NULL) {
1293 		addr = enm->enm_addrlo;
1294 
1295 		memset(&mc, 0, sizeof(mc));
1296 		mc.mc_data0 = htobe32(addr[0] << 24 | addr[1] << 16 |
1297 		    addr[2] << 8 | addr[3]);
1298 		mc.mc_data1 = htobe32(addr[4] << 24 | addr[5] << 16);
1299 		if (myx_cmd(sc, MYXCMD_SET_MCASTGROUP, &mc, NULL) != 0) {
1300 			printf("%s: failed to join mcast group\n", DEVNAME(sc));
1301 			return;
1302 		}
1303 
1304 		ETHER_NEXT_MULTI(step, enm);
1305 	}
1306 
1307 	memset(&mc, 0, sizeof(mc));
1308 	if (myx_cmd(sc, MYXCMD_UNSET_ALLMULTI, &mc, NULL) != 0) {
1309 		printf("%s: failed to disable ALLMULTI\n", DEVNAME(sc));
1310 		return;
1311 	}
1312 }
1313 
1314 void
1315 myx_down(struct myx_softc *sc)
1316 {
1317 	struct ifnet		*ifp = &sc->sc_ac.ac_if;
1318 	volatile struct myx_status *sts = sc->sc_sts;
1319 	bus_dmamap_t		 map = sc->sc_sts_dma.mxm_map;
1320 	struct sleep_state	 sls;
1321 	struct myx_cmd		 mc;
1322 	int			 s;
1323 	int			 ring;
1324 
1325 	CLR(ifp->if_flags, IFF_RUNNING);
1326 
1327 	bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
1328 	    BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
1329 	sc->sc_linkdown = sts->ms_linkdown;
1330 	bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
1331 	    BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1332 
1333 	sc->sc_state = MYX_S_DOWN;
1334 	membar_producer();
1335 
1336 	memset(&mc, 0, sizeof(mc));
1337 	(void)myx_cmd(sc, MYXCMD_SET_IFDOWN, &mc, NULL);
1338 
1339 	while (sc->sc_state != MYX_S_OFF) {
1340 		sleep_setup(&sls, sts, PWAIT, "myxdown");
1341 		membar_consumer();
1342 		sleep_finish(&sls, sc->sc_state != MYX_S_OFF);
1343 	}
1344 
1345 	s = splnet();
1346 	if (ifp->if_link_state != LINK_STATE_UNKNOWN) {
1347 		ifp->if_link_state = LINK_STATE_UNKNOWN;
1348 		ifp->if_baudrate = 0;
1349 		if_link_state_change(ifp);
1350 	}
1351 	splx(s);
1352 
1353 	memset(&mc, 0, sizeof(mc));
1354 	if (myx_cmd(sc, MYXCMD_RESET, &mc, NULL) != 0) {
1355 		printf("%s: failed to reset the device\n", DEVNAME(sc));
1356 	}
1357 
1358 	ifq_clr_oactive(&ifp->if_snd);
1359 	ifq_barrier(&ifp->if_snd);
1360 
1361 	for (ring = 0; ring < 2; ring++) {
1362 		struct myx_rx_ring *mrr = &sc->sc_rx_ring[ring];
1363 
1364 		timeout_del(&mrr->mrr_refill);
1365 		myx_rx_empty(sc, mrr);
1366 		myx_rx_free(sc, mrr);
1367 	}
1368 
1369 	myx_tx_empty(sc);
1370 	myx_tx_free(sc);
1371 
1372 	/* the sleep shizz above already synced this dmamem */
1373 	myx_dmamem_free(sc, &sc->sc_sts_dma);
1374 
1375 	bus_dmamap_sync(sc->sc_dmat, sc->sc_intrq_dma.mxm_map, 0,
1376 	    sc->sc_intrq_dma.mxm_map->dm_mapsize, BUS_DMASYNC_POSTREAD);
1377 	myx_dmamem_free(sc, &sc->sc_intrq_dma);
1378 
1379 	bus_dmamap_sync(sc->sc_dmat, sc->sc_paddma.mxm_map, 0,
1380 	    sc->sc_paddma.mxm_map->dm_mapsize, BUS_DMASYNC_POSTREAD);
1381 	myx_dmamem_free(sc, &sc->sc_paddma);
1382 
1383 	bus_dmamap_sync(sc->sc_dmat, sc->sc_zerodma.mxm_map, 0,
1384 	    sc->sc_zerodma.mxm_map->dm_mapsize, BUS_DMASYNC_POSTREAD);
1385 	myx_dmamem_free(sc, &sc->sc_zerodma);
1386 }
1387 
1388 void
1389 myx_write_txd_tail(struct myx_softc *sc, struct myx_slot *ms, u_int8_t flags,
1390     u_int32_t offset, u_int idx)
1391 {
1392 	struct myx_tx_desc		txd;
1393 	bus_dmamap_t			zmap = sc->sc_zerodma.mxm_map;
1394 	bus_dmamap_t			map = ms->ms_map;
1395 	int				i;
1396 
1397 	for (i = 1; i < map->dm_nsegs; i++) {
1398 		memset(&txd, 0, sizeof(txd));
1399 		txd.tx_addr = htobe64(map->dm_segs[i].ds_addr);
1400 		txd.tx_length = htobe16(map->dm_segs[i].ds_len);
1401 		txd.tx_flags = flags;
1402 
1403 		myx_bus_space_write(sc,
1404 		    offset + sizeof(txd) * ((idx + i) % sc->sc_tx_ring_count),
1405 		    &txd, sizeof(txd));
1406 	}
1407 
1408 	/* pad runt frames */
1409 	if (map->dm_mapsize < 60) {
1410 		memset(&txd, 0, sizeof(txd));
1411 		txd.tx_addr = htobe64(zmap->dm_segs[0].ds_addr);
1412 		txd.tx_length = htobe16(60 - map->dm_mapsize);
1413 		txd.tx_flags = flags;
1414 
1415 		myx_bus_space_write(sc,
1416 		    offset + sizeof(txd) * ((idx + i) % sc->sc_tx_ring_count),
1417 		    &txd, sizeof(txd));
1418 	}
1419 }
1420 
1421 void
1422 myx_start(struct ifqueue *ifq)
1423 {
1424 	struct ifnet			*ifp = ifq->ifq_if;
1425 	struct myx_tx_desc		txd;
1426 	struct myx_softc		*sc = ifp->if_softc;
1427 	struct myx_slot			*ms;
1428 	bus_dmamap_t			map;
1429 	struct mbuf			*m;
1430 	u_int32_t			offset = sc->sc_tx_ring_offset;
1431 	u_int				idx, cons, prod;
1432 	u_int				free, used;
1433 	u_int8_t			flags;
1434 
1435 	idx = sc->sc_tx_ring_prod;
1436 
1437 	/* figure out space */
1438 	free = sc->sc_tx_ring_cons;
1439 	if (free <= idx)
1440 		free += sc->sc_tx_ring_count;
1441 	free -= idx;
1442 
1443 	cons = prod = sc->sc_tx_prod;
1444 
1445 	used = 0;
1446 
1447 	for (;;) {
1448 		if (used + sc->sc_tx_nsegs + 1 > free) {
1449 			ifq_set_oactive(ifq);
1450 			break;
1451 		}
1452 
1453 		m = ifq_dequeue(ifq);
1454 		if (m == NULL)
1455 			break;
1456 
1457 		ms = &sc->sc_tx_slots[prod];
1458 
1459 		if (myx_load_mbuf(sc, ms, m) != 0) {
1460 			m_freem(m);
1461 			ifp->if_oerrors++;
1462 			continue;
1463 		}
1464 
1465 #if NBPFILTER > 0
1466 		if (ifp->if_bpf)
1467 			bpf_mtap(ifp->if_bpf, m, BPF_DIRECTION_OUT);
1468 #endif
1469 
1470 		map = ms->ms_map;
1471 		bus_dmamap_sync(sc->sc_dmat, map, 0,
1472 		    map->dm_mapsize, BUS_DMASYNC_PREWRITE);
1473 
1474 		used += map->dm_nsegs + (map->dm_mapsize < 60 ? 1 : 0);
1475 
1476 		if (++prod >= sc->sc_tx_ring_count)
1477 			prod = 0;
1478 	}
1479 
1480 	if (cons == prod)
1481 		return;
1482 
1483 	ms = &sc->sc_tx_slots[cons];
1484 
1485 	for (;;) {
1486 		idx += ms->ms_map->dm_nsegs +
1487 		    (ms->ms_map->dm_mapsize < 60 ? 1 : 0);
1488 		if (idx >= sc->sc_tx_ring_count)
1489 			idx -= sc->sc_tx_ring_count;
1490 
1491 		if (++cons >= sc->sc_tx_ring_count)
1492 			cons = 0;
1493 
1494 		if (cons == prod)
1495 			break;
1496 
1497 		ms = &sc->sc_tx_slots[cons];
1498 		map = ms->ms_map;
1499 
1500 		flags = MYXTXD_FLAGS_NO_TSO;
1501 		if (map->dm_mapsize < 1520)
1502 			flags |= MYXTXD_FLAGS_SMALL;
1503 
1504 		memset(&txd, 0, sizeof(txd));
1505 		txd.tx_addr = htobe64(map->dm_segs[0].ds_addr);
1506 		txd.tx_length = htobe16(map->dm_segs[0].ds_len);
1507 		txd.tx_nsegs = map->dm_nsegs + (map->dm_mapsize < 60 ? 1 : 0);
1508 		txd.tx_flags = flags | MYXTXD_FLAGS_FIRST;
1509 		myx_bus_space_write(sc,
1510 		    offset + sizeof(txd) * idx, &txd, sizeof(txd));
1511 
1512 		myx_write_txd_tail(sc, ms, flags, offset, idx);
1513 	}
1514 
1515 	/* go back and post first packet */
1516 	ms = &sc->sc_tx_slots[sc->sc_tx_prod];
1517 	map = ms->ms_map;
1518 
1519 	flags = MYXTXD_FLAGS_NO_TSO;
1520 	if (map->dm_mapsize < 1520)
1521 		flags |= MYXTXD_FLAGS_SMALL;
1522 
1523 	memset(&txd, 0, sizeof(txd));
1524 	txd.tx_addr = htobe64(map->dm_segs[0].ds_addr);
1525 	txd.tx_length = htobe16(map->dm_segs[0].ds_len);
1526 	txd.tx_nsegs = map->dm_nsegs + (map->dm_mapsize < 60 ? 1 : 0);
1527 	txd.tx_flags = flags | MYXTXD_FLAGS_FIRST;
1528 
1529 	/* make sure the first descriptor is seen after the others */
1530 	myx_write_txd_tail(sc, ms, flags, offset, sc->sc_tx_ring_prod);
1531 
1532 	myx_bus_space_write(sc,
1533 	    offset + sizeof(txd) * sc->sc_tx_ring_prod, &txd,
1534 	    sizeof(txd) - sizeof(myx_bus_t));
1535 
1536 	bus_space_barrier(sc->sc_memt, sc->sc_memh, offset,
1537 	    sizeof(txd) * sc->sc_tx_ring_count, BUS_SPACE_BARRIER_WRITE);
1538 
1539 	myx_bus_space_write(sc,
1540 	    offset + sizeof(txd) * (sc->sc_tx_ring_prod + 1) -
1541 	    sizeof(myx_bus_t),
1542 	    (u_int8_t *)&txd + sizeof(txd) - sizeof(myx_bus_t),
1543 	    sizeof(myx_bus_t));
1544 
1545 	bus_space_barrier(sc->sc_memt, sc->sc_memh,
1546 	    offset + sizeof(txd) * sc->sc_tx_ring_prod, sizeof(txd),
1547 	    BUS_SPACE_BARRIER_WRITE);
1548 
1549 	/* commit */
1550 	sc->sc_tx_ring_prod = idx;
1551 	sc->sc_tx_prod = prod;
1552 }
1553 
1554 int
1555 myx_load_mbuf(struct myx_softc *sc, struct myx_slot *ms, struct mbuf *m)
1556 {
1557 	bus_dma_tag_t			dmat = sc->sc_dmat;
1558 	bus_dmamap_t			dmap = ms->ms_map;
1559 
1560 	switch (bus_dmamap_load_mbuf(dmat, dmap, m,
1561 	    BUS_DMA_STREAMING | BUS_DMA_NOWAIT)) {
1562 	case 0:
1563 		break;
1564 
1565 	case EFBIG: /* mbuf chain is too fragmented */
1566 		if (m_defrag(m, M_DONTWAIT) == 0 &&
1567 		    bus_dmamap_load_mbuf(dmat, dmap, m,
1568 		    BUS_DMA_STREAMING | BUS_DMA_NOWAIT) == 0)
1569 			break;
1570 	default:
1571 		return (1);
1572 	}
1573 
1574 	ms->ms_m = m;
1575 	return (0);
1576 }
1577 
1578 int
1579 myx_intr(void *arg)
1580 {
1581 	struct myx_softc	*sc = (struct myx_softc *)arg;
1582 	volatile struct myx_status *sts = sc->sc_sts;
1583 	enum myx_state		 state;
1584 	bus_dmamap_t		 map = sc->sc_sts_dma.mxm_map;
1585 	u_int32_t		 data;
1586 	u_int8_t		 valid = 0;
1587 
1588 	state = sc->sc_state;
1589 	if (state == MYX_S_OFF)
1590 		return (0);
1591 
1592 	bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
1593 	    BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
1594 
1595 	valid = sts->ms_isvalid;
1596 	if (valid == 0x0) {
1597 		bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
1598 		    BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1599 		return (0);
1600 	}
1601 
1602 	if (sc->sc_intx) {
1603 		data = htobe32(0);
1604 		bus_space_write_raw_region_4(sc->sc_memt, sc->sc_memh,
1605 		    sc->sc_irqdeassertoff, &data, sizeof(data));
1606 	}
1607 	sts->ms_isvalid = 0;
1608 
1609 	do {
1610 		data = sts->ms_txdonecnt;
1611 
1612 		bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
1613 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE |
1614 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1615 	} while (sts->ms_isvalid);
1616 
1617 	data = betoh32(data);
1618 	if (data != sc->sc_tx_count)
1619 		myx_txeof(sc, data);
1620 
1621 	data = htobe32(3);
1622 	if (valid & 0x1) {
1623 		myx_rxeof(sc);
1624 
1625 		bus_space_write_raw_region_4(sc->sc_memt, sc->sc_memh,
1626 		    sc->sc_irqclaimoff, &data, sizeof(data));
1627 	}
1628 	bus_space_write_raw_region_4(sc->sc_memt, sc->sc_memh,
1629 	    sc->sc_irqclaimoff + sizeof(data), &data, sizeof(data));
1630 
1631 	if (sts->ms_statusupdated) {
1632 		if (state == MYX_S_DOWN &&
1633 		    sc->sc_linkdown != sts->ms_linkdown) {
1634 			sc->sc_state = MYX_S_OFF;
1635 			membar_producer();
1636 			wakeup(sts);
1637 		} else {
1638 			data = sts->ms_linkstate;
1639 			if (data != 0xffffffff) {
1640 				KERNEL_LOCK();
1641 				myx_link_state(sc, data);
1642 				KERNEL_UNLOCK();
1643 			}
1644 		}
1645 	}
1646 
1647 	bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
1648 	    BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1649 
1650 	return (1);
1651 }
1652 
1653 void
1654 myx_refill(void *xmrr)
1655 {
1656 	struct myx_rx_ring *mrr = xmrr;
1657 	struct myx_softc *sc = mrr->mrr_softc;
1658 
1659 	myx_rx_fill(sc, mrr);
1660 
1661 	if (mrr->mrr_prod == mrr->mrr_cons)
1662 		timeout_add(&mrr->mrr_refill, 1);
1663 }
1664 
1665 void
1666 myx_txeof(struct myx_softc *sc, u_int32_t done_count)
1667 {
1668 	struct ifnet *ifp = &sc->sc_ac.ac_if;
1669 	struct myx_slot *ms;
1670 	bus_dmamap_t map;
1671 	u_int idx, cons;
1672 
1673 	idx = sc->sc_tx_ring_cons;
1674 	cons = sc->sc_tx_cons;
1675 
1676 	do {
1677 		ms = &sc->sc_tx_slots[cons];
1678 		map = ms->ms_map;
1679 
1680 		idx += map->dm_nsegs + (map->dm_mapsize < 60 ? 1 : 0);
1681 
1682 		bus_dmamap_sync(sc->sc_dmat, map, 0,
1683 		    map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
1684 		bus_dmamap_unload(sc->sc_dmat, map);
1685 		m_freem(ms->ms_m);
1686 
1687 		if (++cons >= sc->sc_tx_ring_count)
1688 			cons = 0;
1689 	} while (++sc->sc_tx_count != done_count);
1690 
1691 	if (idx >= sc->sc_tx_ring_count)
1692 		idx -= sc->sc_tx_ring_count;
1693 
1694 	sc->sc_tx_ring_cons = idx;
1695 	sc->sc_tx_cons = cons;
1696 
1697 	if (ifq_is_oactive(&ifp->if_snd))
1698 		ifq_restart(&ifp->if_snd);
1699 }
1700 
1701 void
1702 myx_rxeof(struct myx_softc *sc)
1703 {
1704 	static const struct myx_intrq_desc zerodesc = { 0, 0 };
1705 	struct ifnet *ifp = &sc->sc_ac.ac_if;
1706 	struct mbuf_list ml = MBUF_LIST_INITIALIZER();
1707 	struct myx_rx_ring *mrr;
1708 	struct myx_slot *ms;
1709 	struct mbuf *m;
1710 	int ring;
1711 	u_int rxfree[2] = { 0 , 0 };
1712 	u_int len;
1713 
1714 	bus_dmamap_sync(sc->sc_dmat, sc->sc_intrq_dma.mxm_map, 0,
1715 	    sc->sc_intrq_dma.mxm_map->dm_mapsize, BUS_DMASYNC_POSTREAD);
1716 
1717 	while ((len = betoh16(sc->sc_intrq[sc->sc_intrq_idx].iq_length)) != 0) {
1718 		sc->sc_intrq[sc->sc_intrq_idx] = zerodesc;
1719 
1720 		if (++sc->sc_intrq_idx >= sc->sc_intrq_count)
1721 			sc->sc_intrq_idx = 0;
1722 
1723 		ring = (len <= (MYX_RXSMALL_SIZE - ETHER_ALIGN)) ?
1724 		    MYX_RXSMALL : MYX_RXBIG;
1725 
1726 		mrr = &sc->sc_rx_ring[ring];
1727 		ms = &mrr->mrr_slots[mrr->mrr_cons];
1728 
1729 		if (++mrr->mrr_cons >= sc->sc_rx_ring_count)
1730 			mrr->mrr_cons = 0;
1731 
1732 		bus_dmamap_sync(sc->sc_dmat, ms->ms_map, 0,
1733 		    ms->ms_map->dm_mapsize, BUS_DMASYNC_POSTREAD);
1734 		bus_dmamap_unload(sc->sc_dmat, ms->ms_map);
1735 
1736 		m = ms->ms_m;
1737 		m->m_data += ETHER_ALIGN;
1738 		m->m_pkthdr.len = m->m_len = len;
1739 
1740 		ml_enqueue(&ml, m);
1741 
1742 		rxfree[ring]++;
1743 	}
1744 
1745 	bus_dmamap_sync(sc->sc_dmat, sc->sc_intrq_dma.mxm_map, 0,
1746 	    sc->sc_intrq_dma.mxm_map->dm_mapsize, BUS_DMASYNC_PREREAD);
1747 
1748 	for (ring = MYX_RXSMALL; ring <= MYX_RXBIG; ring++) {
1749 		if (rxfree[ring] == 0)
1750 			continue;
1751 
1752 		mrr = &sc->sc_rx_ring[ring];
1753 
1754 		if_rxr_put(&mrr->mrr_rxr, rxfree[ring]);
1755 		myx_rx_fill(sc, mrr);
1756 		if (mrr->mrr_prod == mrr->mrr_cons)
1757 			timeout_add(&mrr->mrr_refill, 0);
1758 	}
1759 
1760 	if_input(ifp, &ml);
1761 }
1762 
1763 static int
1764 myx_rx_fill_slots(struct myx_softc *sc, struct myx_rx_ring *mrr, u_int slots)
1765 {
1766 	struct myx_rx_desc rxd;
1767 	struct myx_slot *ms;
1768 	u_int32_t offset = mrr->mrr_offset;
1769 	u_int p, first, fills;
1770 
1771 	first = p = mrr->mrr_prod;
1772 	if (myx_buf_fill(sc, &mrr->mrr_slots[first], mrr->mrr_mclget) != 0)
1773 		return (slots);
1774 
1775 	if (++p >= sc->sc_rx_ring_count)
1776 		p = 0;
1777 
1778 	for (fills = 1; fills < slots; fills++) {
1779 		ms = &mrr->mrr_slots[p];
1780 
1781 		if (myx_buf_fill(sc, ms, mrr->mrr_mclget) != 0)
1782 			break;
1783 
1784 		rxd.rx_addr = htobe64(ms->ms_map->dm_segs[0].ds_addr);
1785 		myx_bus_space_write(sc, offset + p * sizeof(rxd),
1786 		    &rxd, sizeof(rxd));
1787 
1788 		if (++p >= sc->sc_rx_ring_count)
1789 			p = 0;
1790 	}
1791 
1792 	mrr->mrr_prod = p;
1793 
1794 	/* make sure the first descriptor is seen after the others */
1795 	if (fills > 1) {
1796 		bus_space_barrier(sc->sc_memt, sc->sc_memh,
1797 		    offset, sizeof(rxd) * sc->sc_rx_ring_count,
1798 		    BUS_SPACE_BARRIER_WRITE);
1799 	}
1800 
1801 	ms = &mrr->mrr_slots[first];
1802 	rxd.rx_addr = htobe64(ms->ms_map->dm_segs[0].ds_addr);
1803 	myx_bus_space_write(sc, offset + first * sizeof(rxd),
1804 	    &rxd, sizeof(rxd));
1805 
1806 	return (slots - fills);
1807 }
1808 
1809 int
1810 myx_rx_init(struct myx_softc *sc, int ring, bus_size_t size)
1811 {
1812 	struct myx_rx_desc rxd;
1813 	struct myx_rx_ring *mrr = &sc->sc_rx_ring[ring];
1814 	struct myx_slot *ms;
1815 	u_int32_t offset = mrr->mrr_offset;
1816 	int rv;
1817 	int i;
1818 
1819 	mrr->mrr_slots = mallocarray(sizeof(*ms), sc->sc_rx_ring_count,
1820 	    M_DEVBUF, M_WAITOK);
1821 	if (mrr->mrr_slots == NULL)
1822 		return (ENOMEM);
1823 
1824 	memset(&rxd, 0xff, sizeof(rxd));
1825 	for (i = 0; i < sc->sc_rx_ring_count; i++) {
1826 		ms = &mrr->mrr_slots[i];
1827 		rv = bus_dmamap_create(sc->sc_dmat, size, 1, size, 0,
1828 		    BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW, &ms->ms_map);
1829 		if (rv != 0)
1830 			goto destroy;
1831 
1832 		myx_bus_space_write(sc, offset + i * sizeof(rxd),
1833 		    &rxd, sizeof(rxd));
1834 	}
1835 
1836 	if_rxr_init(&mrr->mrr_rxr, 2, sc->sc_rx_ring_count - 2);
1837 	mrr->mrr_prod = mrr->mrr_cons = 0;
1838 
1839 	return (0);
1840 
1841 destroy:
1842 	while (i-- > 0) {
1843 		ms = &mrr->mrr_slots[i];
1844 		bus_dmamap_destroy(sc->sc_dmat, ms->ms_map);
1845 	}
1846 	free(mrr->mrr_slots, M_DEVBUF, sizeof(*ms) * sc->sc_rx_ring_count);
1847 	return (rv);
1848 }
1849 
1850 int
1851 myx_rx_fill(struct myx_softc *sc, struct myx_rx_ring *mrr)
1852 {
1853 	u_int slots;
1854 
1855 	slots = if_rxr_get(&mrr->mrr_rxr, sc->sc_rx_ring_count);
1856 	if (slots == 0)
1857 		return (1);
1858 
1859 	slots = myx_rx_fill_slots(sc, mrr, slots);
1860 	if (slots > 0)
1861 		if_rxr_put(&mrr->mrr_rxr, slots);
1862 
1863 	return (0);
1864 }
1865 
1866 void
1867 myx_rx_empty(struct myx_softc *sc, struct myx_rx_ring *mrr)
1868 {
1869 	struct myx_slot *ms;
1870 
1871 	while (mrr->mrr_cons != mrr->mrr_prod) {
1872 		ms = &mrr->mrr_slots[mrr->mrr_cons];
1873 
1874 		if (++mrr->mrr_cons >= sc->sc_rx_ring_count)
1875 			mrr->mrr_cons = 0;
1876 
1877 		bus_dmamap_sync(sc->sc_dmat, ms->ms_map, 0,
1878 		    ms->ms_map->dm_mapsize, BUS_DMASYNC_POSTREAD);
1879 		bus_dmamap_unload(sc->sc_dmat, ms->ms_map);
1880 		m_freem(ms->ms_m);
1881 	}
1882 
1883 	if_rxr_init(&mrr->mrr_rxr, 2, sc->sc_rx_ring_count - 2);
1884 }
1885 
1886 void
1887 myx_rx_free(struct myx_softc *sc, struct myx_rx_ring *mrr)
1888 {
1889 	struct myx_slot *ms;
1890 	int i;
1891 
1892 	for (i = 0; i < sc->sc_rx_ring_count; i++) {
1893 		ms = &mrr->mrr_slots[i];
1894 		bus_dmamap_destroy(sc->sc_dmat, ms->ms_map);
1895 	}
1896 
1897 	free(mrr->mrr_slots, M_DEVBUF, sizeof(*ms) * sc->sc_rx_ring_count);
1898 }
1899 
1900 struct mbuf *
1901 myx_mcl_small(void)
1902 {
1903 	struct mbuf *m;
1904 
1905 	m = MCLGETI(NULL, M_DONTWAIT, NULL, MYX_RXSMALL_SIZE);
1906 	if (m == NULL)
1907 		return (NULL);
1908 
1909 	m->m_len = m->m_pkthdr.len = MYX_RXSMALL_SIZE;
1910 
1911 	return (m);
1912 }
1913 
1914 struct mbuf *
1915 myx_mcl_big(void)
1916 {
1917 	struct mbuf *m;
1918 	void *mcl;
1919 
1920 	MGETHDR(m, M_DONTWAIT, MT_DATA);
1921 	if (m == NULL)
1922 		return (NULL);
1923 
1924 	mcl = pool_get(myx_mcl_pool, PR_NOWAIT);
1925 	if (mcl == NULL) {
1926 		m_free(m);
1927 		return (NULL);
1928 	}
1929 
1930 	MEXTADD(m, mcl, MYX_RXBIG_SIZE, M_EXTWR, MEXTFREE_POOL, myx_mcl_pool);
1931 	m->m_len = m->m_pkthdr.len = MYX_RXBIG_SIZE;
1932 
1933 	return (m);
1934 }
1935 
1936 int
1937 myx_buf_fill(struct myx_softc *sc, struct myx_slot *ms,
1938     struct mbuf *(*mclget)(void))
1939 {
1940 	struct mbuf *m;
1941 	int rv;
1942 
1943 	m = (*mclget)();
1944 	if (m == NULL)
1945 		return (ENOMEM);
1946 
1947 	rv = bus_dmamap_load_mbuf(sc->sc_dmat, ms->ms_map, m, BUS_DMA_NOWAIT);
1948 	if (rv != 0) {
1949 		m_freem(m);
1950 		return (rv);
1951 	}
1952 
1953 	bus_dmamap_sync(sc->sc_dmat, ms->ms_map, 0,
1954 	    ms->ms_map->dm_mapsize, BUS_DMASYNC_PREREAD);
1955 
1956 	ms->ms_m = m;
1957 
1958 	return (0);
1959 }
1960 
1961 int
1962 myx_tx_init(struct myx_softc *sc, bus_size_t size)
1963 {
1964 	struct myx_slot *ms;
1965 	int rv;
1966 	int i;
1967 
1968 	sc->sc_tx_slots = mallocarray(sizeof(*ms), sc->sc_tx_ring_count,
1969 	    M_DEVBUF, M_WAITOK);
1970 	if (sc->sc_tx_slots == NULL)
1971 		return (ENOMEM);
1972 
1973 	for (i = 0; i < sc->sc_tx_ring_count; i++) {
1974 		ms = &sc->sc_tx_slots[i];
1975 		rv = bus_dmamap_create(sc->sc_dmat, size, sc->sc_tx_nsegs,
1976 		    sc->sc_tx_boundary, sc->sc_tx_boundary,
1977 		    BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW, &ms->ms_map);
1978 		if (rv != 0)
1979 			goto destroy;
1980 	}
1981 
1982 	sc->sc_tx_prod = sc->sc_tx_cons = 0;
1983 
1984 	return (0);
1985 
1986 destroy:
1987 	while (i-- > 0) {
1988 		ms = &sc->sc_tx_slots[i];
1989 		bus_dmamap_destroy(sc->sc_dmat, ms->ms_map);
1990 	}
1991 	free(sc->sc_tx_slots, M_DEVBUF, sizeof(*ms) * sc->sc_tx_ring_count);
1992 	return (rv);
1993 }
1994 
1995 void
1996 myx_tx_empty(struct myx_softc *sc)
1997 {
1998 	struct myx_slot *ms;
1999 	u_int cons = sc->sc_tx_cons;
2000 	u_int prod = sc->sc_tx_prod;
2001 
2002 	while (cons != prod) {
2003 		ms = &sc->sc_tx_slots[cons];
2004 
2005 		bus_dmamap_sync(sc->sc_dmat, ms->ms_map, 0,
2006 		    ms->ms_map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
2007 		bus_dmamap_unload(sc->sc_dmat, ms->ms_map);
2008 		m_freem(ms->ms_m);
2009 
2010 		if (++cons >= sc->sc_tx_ring_count)
2011 			cons = 0;
2012 	}
2013 
2014 	sc->sc_tx_cons = cons;
2015 }
2016 
2017 void
2018 myx_tx_free(struct myx_softc *sc)
2019 {
2020 	struct myx_slot *ms;
2021 	int i;
2022 
2023 	for (i = 0; i < sc->sc_tx_ring_count; i++) {
2024 		ms = &sc->sc_tx_slots[i];
2025 		bus_dmamap_destroy(sc->sc_dmat, ms->ms_map);
2026 	}
2027 
2028 	free(sc->sc_tx_slots, M_DEVBUF, sizeof(*ms) * sc->sc_tx_ring_count);
2029 }
2030