xref: /openbsd-src/sys/dev/pci/if_myx.c (revision f2da64fbbbf1b03f09f390ab01267c93dfd77c4c)
1 /*	$OpenBSD: if_myx.c,v 1.96 2016/09/15 02:00:17 dlg Exp $	*/
2 
3 /*
4  * Copyright (c) 2007 Reyk Floeter <reyk@openbsd.org>
5  *
6  * Permission to use, copy, modify, and distribute this software for any
7  * purpose with or without fee is hereby granted, provided that the above
8  * copyright notice and this permission notice appear in all copies.
9  *
10  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
11  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
12  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
13  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
14  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
15  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
16  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 /*
20  * Driver for the Myricom Myri-10G Lanai-Z8E Ethernet chipsets.
21  */
22 
23 #include "bpfilter.h"
24 
25 #include <sys/param.h>
26 #include <sys/systm.h>
27 #include <sys/sockio.h>
28 #include <sys/mbuf.h>
29 #include <sys/kernel.h>
30 #include <sys/socket.h>
31 #include <sys/malloc.h>
32 #include <sys/pool.h>
33 #include <sys/timeout.h>
34 #include <sys/device.h>
35 #include <sys/proc.h>
36 #include <sys/queue.h>
37 
38 #include <machine/bus.h>
39 #include <machine/intr.h>
40 
41 #include <net/if.h>
42 #include <net/if_dl.h>
43 #include <net/if_media.h>
44 
45 #if NBPFILTER > 0
46 #include <net/bpf.h>
47 #endif
48 
49 #include <netinet/in.h>
50 #include <netinet/if_ether.h>
51 
52 #include <dev/pci/pcireg.h>
53 #include <dev/pci/pcivar.h>
54 #include <dev/pci/pcidevs.h>
55 
56 #include <dev/pci/if_myxreg.h>
57 
58 #ifdef MYX_DEBUG
59 #define MYXDBG_INIT	(1<<0)	/* chipset initialization */
60 #define MYXDBG_CMD	(2<<0)	/* commands */
61 #define MYXDBG_INTR	(3<<0)	/* interrupts */
62 #define MYXDBG_ALL	0xffff	/* enable all debugging messages */
63 int myx_debug = MYXDBG_ALL;
64 #define DPRINTF(_lvl, _arg...)	do {					\
65 	if (myx_debug & (_lvl))						\
66 		printf(_arg);						\
67 } while (0)
68 #else
69 #define DPRINTF(_lvl, arg...)
70 #endif
71 
72 #define DEVNAME(_s)	((_s)->sc_dev.dv_xname)
73 
74 struct myx_dmamem {
75 	bus_dmamap_t		 mxm_map;
76 	bus_dma_segment_t	 mxm_seg;
77 	int			 mxm_nsegs;
78 	size_t			 mxm_size;
79 	caddr_t			 mxm_kva;
80 };
81 
82 struct pool *myx_mcl_pool;
83 
84 struct myx_slot {
85 	bus_dmamap_t		 ms_map;
86 	struct mbuf		*ms_m;
87 };
88 
89 struct myx_rx_ring {
90 	struct myx_softc	*mrr_softc;
91 	struct timeout		 mrr_refill;
92 	struct if_rxring	 mrr_rxr;
93 	struct myx_slot		*mrr_slots;
94 	u_int32_t		 mrr_offset;
95 	u_int			 mrr_running;
96 	u_int			 mrr_prod;
97 	u_int			 mrr_cons;
98 	struct mbuf		*(*mrr_mclget)(void);
99 };
100 
101 enum myx_state {
102 	MYX_S_OFF = 0,
103 	MYX_S_RUNNING,
104 	MYX_S_DOWN
105 };
106 
107 struct myx_softc {
108 	struct device		 sc_dev;
109 	struct arpcom		 sc_ac;
110 
111 	pci_chipset_tag_t	 sc_pc;
112 	pci_intr_handle_t	 sc_ih;
113 	pcitag_t		 sc_tag;
114 
115 	bus_dma_tag_t		 sc_dmat;
116 	bus_space_tag_t		 sc_memt;
117 	bus_space_handle_t	 sc_memh;
118 	bus_size_t		 sc_mems;
119 
120 	struct myx_dmamem	 sc_zerodma;
121 	struct myx_dmamem	 sc_cmddma;
122 	struct myx_dmamem	 sc_paddma;
123 
124 	struct myx_dmamem	 sc_sts_dma;
125 	volatile struct myx_status	*sc_sts;
126 
127 	int			 sc_intx;
128 	void			*sc_irqh;
129 	u_int32_t		 sc_irqcoaloff;
130 	u_int32_t		 sc_irqclaimoff;
131 	u_int32_t		 sc_irqdeassertoff;
132 
133 	struct myx_dmamem	 sc_intrq_dma;
134 	struct myx_intrq_desc	*sc_intrq;
135 	u_int			 sc_intrq_count;
136 	u_int			 sc_intrq_idx;
137 
138 	u_int			 sc_rx_ring_count;
139 #define  MYX_RXSMALL		 0
140 #define  MYX_RXBIG		 1
141 	struct myx_rx_ring	 sc_rx_ring[2];
142 
143 	bus_size_t		 sc_tx_boundary;
144 	u_int			 sc_tx_ring_count;
145 	u_int32_t		 sc_tx_ring_offset;
146 	u_int			 sc_tx_nsegs;
147 	u_int32_t		 sc_tx_count; /* shadows ms_txdonecnt */
148 	u_int			 sc_tx_ring_prod;
149 	u_int			 sc_tx_ring_cons;
150 
151 	u_int			 sc_tx_prod;
152 	u_int			 sc_tx_cons;
153 	struct myx_slot		*sc_tx_slots;
154 
155 	struct ifmedia		 sc_media;
156 
157 	volatile enum myx_state	 sc_state;
158 	volatile u_int8_t	 sc_linkdown;
159 };
160 
161 #define MYX_RXSMALL_SIZE	MCLBYTES
162 #define MYX_RXBIG_SIZE		(9 * 1024)
163 
164 int	 myx_match(struct device *, void *, void *);
165 void	 myx_attach(struct device *, struct device *, void *);
166 int	 myx_pcie_dc(struct myx_softc *, struct pci_attach_args *);
167 int	 myx_query(struct myx_softc *sc, char *, size_t);
168 u_int	 myx_ether_aton(char *, u_int8_t *, u_int);
169 void	 myx_attachhook(struct device *);
170 int	 myx_loadfirmware(struct myx_softc *, const char *);
171 int	 myx_probe_firmware(struct myx_softc *);
172 
173 void	 myx_read(struct myx_softc *, bus_size_t, void *, bus_size_t);
174 void	 myx_write(struct myx_softc *, bus_size_t, void *, bus_size_t);
175 
176 #if defined(__LP64__)
177 #define _myx_bus_space_write bus_space_write_raw_region_8
178 typedef u_int64_t myx_bus_t;
179 #else
180 #define _myx_bus_space_write bus_space_write_raw_region_4
181 typedef u_int32_t myx_bus_t;
182 #endif
183 #define myx_bus_space_write(_sc, _o, _a, _l) \
184     _myx_bus_space_write((_sc)->sc_memt, (_sc)->sc_memh, (_o), (_a), (_l))
185 
186 int	 myx_cmd(struct myx_softc *, u_int32_t, struct myx_cmd *, u_int32_t *);
187 int	 myx_boot(struct myx_softc *, u_int32_t);
188 
189 int	 myx_rdma(struct myx_softc *, u_int);
190 int	 myx_dmamem_alloc(struct myx_softc *, struct myx_dmamem *,
191 	    bus_size_t, u_int align);
192 void	 myx_dmamem_free(struct myx_softc *, struct myx_dmamem *);
193 int	 myx_media_change(struct ifnet *);
194 void	 myx_media_status(struct ifnet *, struct ifmediareq *);
195 void	 myx_link_state(struct myx_softc *, u_int32_t);
196 void	 myx_watchdog(struct ifnet *);
197 int	 myx_ioctl(struct ifnet *, u_long, caddr_t);
198 int	 myx_rxrinfo(struct myx_softc *, struct if_rxrinfo *);
199 void	 myx_up(struct myx_softc *);
200 void	 myx_iff(struct myx_softc *);
201 void	 myx_down(struct myx_softc *);
202 
203 void	 myx_start(struct ifnet *);
204 void	 myx_write_txd_tail(struct myx_softc *, struct myx_slot *, u_int8_t,
205 	    u_int32_t, u_int);
206 int	 myx_load_mbuf(struct myx_softc *, struct myx_slot *, struct mbuf *);
207 int	 myx_setlladdr(struct myx_softc *, u_int32_t, u_int8_t *);
208 int	 myx_intr(void *);
209 void	 myx_rxeof(struct myx_softc *);
210 void	 myx_txeof(struct myx_softc *, u_int32_t);
211 
212 int			myx_buf_fill(struct myx_softc *, struct myx_slot *,
213 			    struct mbuf *(*)(void));
214 struct mbuf *		myx_mcl_small(void);
215 struct mbuf *		myx_mcl_big(void);
216 
217 int			myx_rx_init(struct myx_softc *, int, bus_size_t);
218 int			myx_rx_fill(struct myx_softc *, struct myx_rx_ring *);
219 void			myx_rx_empty(struct myx_softc *, struct myx_rx_ring *);
220 void			myx_rx_free(struct myx_softc *, struct myx_rx_ring *);
221 
222 int			myx_tx_init(struct myx_softc *, bus_size_t);
223 void			myx_tx_empty(struct myx_softc *);
224 void			myx_tx_free(struct myx_softc *);
225 
226 void			myx_refill(void *);
227 
228 struct cfdriver myx_cd = {
229 	NULL, "myx", DV_IFNET
230 };
231 struct cfattach myx_ca = {
232 	sizeof(struct myx_softc), myx_match, myx_attach
233 };
234 
235 const struct pci_matchid myx_devices[] = {
236 	{ PCI_VENDOR_MYRICOM, PCI_PRODUCT_MYRICOM_Z8E },
237 	{ PCI_VENDOR_MYRICOM, PCI_PRODUCT_MYRICOM_Z8E_9 }
238 };
239 
240 int
241 myx_match(struct device *parent, void *match, void *aux)
242 {
243 	return (pci_matchbyid(aux, myx_devices, nitems(myx_devices)));
244 }
245 
246 void
247 myx_attach(struct device *parent, struct device *self, void *aux)
248 {
249 	struct myx_softc	*sc = (struct myx_softc *)self;
250 	struct pci_attach_args	*pa = aux;
251 	char			 part[32];
252 	pcireg_t		 memtype;
253 
254 	sc->sc_pc = pa->pa_pc;
255 	sc->sc_tag = pa->pa_tag;
256 	sc->sc_dmat = pa->pa_dmat;
257 
258 	sc->sc_rx_ring[MYX_RXSMALL].mrr_softc = sc;
259 	sc->sc_rx_ring[MYX_RXSMALL].mrr_mclget = myx_mcl_small;
260 	timeout_set(&sc->sc_rx_ring[MYX_RXSMALL].mrr_refill, myx_refill,
261 	    &sc->sc_rx_ring[MYX_RXSMALL]);
262 	sc->sc_rx_ring[MYX_RXBIG].mrr_softc = sc;
263 	sc->sc_rx_ring[MYX_RXBIG].mrr_mclget = myx_mcl_big;
264 	timeout_set(&sc->sc_rx_ring[MYX_RXBIG].mrr_refill, myx_refill,
265 	    &sc->sc_rx_ring[MYX_RXBIG]);
266 
267 	/* Map the PCI memory space */
268 	memtype = pci_mapreg_type(sc->sc_pc, sc->sc_tag, MYXBAR0);
269 	if (pci_mapreg_map(pa, MYXBAR0, memtype, BUS_SPACE_MAP_PREFETCHABLE,
270 	    &sc->sc_memt, &sc->sc_memh, NULL, &sc->sc_mems, 0)) {
271 		printf(": unable to map register memory\n");
272 		return;
273 	}
274 
275 	/* Get board details (mac/part) */
276 	memset(part, 0, sizeof(part));
277 	if (myx_query(sc, part, sizeof(part)) != 0)
278 		goto unmap;
279 
280 	/* Map the interrupt */
281 	if (pci_intr_map_msi(pa, &sc->sc_ih) != 0) {
282 		if (pci_intr_map(pa, &sc->sc_ih) != 0) {
283 			printf(": unable to map interrupt\n");
284 			goto unmap;
285 		}
286 		sc->sc_intx = 1;
287 	}
288 
289 	printf(": %s, model %s, address %s\n",
290 	    pci_intr_string(pa->pa_pc, sc->sc_ih),
291 	    part[0] == '\0' ? "(unknown)" : part,
292 	    ether_sprintf(sc->sc_ac.ac_enaddr));
293 
294 	/* this is sort of racy */
295 	if (myx_mcl_pool == NULL) {
296 		extern struct kmem_pa_mode kp_dma_contig;
297 
298 		myx_mcl_pool = malloc(sizeof(*myx_mcl_pool), M_DEVBUF,
299 		    M_WAITOK);
300 		if (myx_mcl_pool == NULL) {
301 			printf("%s: unable to allocate mcl pool\n",
302 			    DEVNAME(sc));
303 			goto unmap;
304 		}
305 		pool_init(myx_mcl_pool, MYX_RXBIG_SIZE, MYX_BOUNDARY, IPL_NET,
306 		    0, "myxmcl", NULL);
307 		pool_set_constraints(myx_mcl_pool, &kp_dma_contig);
308 	}
309 
310 	if (myx_pcie_dc(sc, pa) != 0)
311 		printf("%s: unable to configure PCI Express\n", DEVNAME(sc));
312 
313 	config_mountroot(self, myx_attachhook);
314 
315 	return;
316 
317  unmap:
318 	bus_space_unmap(sc->sc_memt, sc->sc_memh, sc->sc_mems);
319 	sc->sc_mems = 0;
320 }
321 
322 int
323 myx_pcie_dc(struct myx_softc *sc, struct pci_attach_args *pa)
324 {
325 	pcireg_t dcsr;
326 	pcireg_t mask = PCI_PCIE_DCSR_MPS | PCI_PCIE_DCSR_ERO;
327 	pcireg_t dc = ((fls(4096) - 8) << 12) | PCI_PCIE_DCSR_ERO;
328 	int reg;
329 
330 	if (pci_get_capability(sc->sc_pc, pa->pa_tag, PCI_CAP_PCIEXPRESS,
331 	    &reg, NULL) == 0)
332 		return (-1);
333 
334 	reg += PCI_PCIE_DCSR;
335 	dcsr = pci_conf_read(sc->sc_pc, pa->pa_tag, reg);
336 	if ((dcsr & mask) != dc) {
337 		CLR(dcsr, mask);
338 		SET(dcsr, dc);
339 		pci_conf_write(sc->sc_pc, pa->pa_tag, reg, dcsr);
340 	}
341 
342 	return (0);
343 }
344 
345 u_int
346 myx_ether_aton(char *mac, u_int8_t *lladdr, u_int maxlen)
347 {
348 	u_int		i, j;
349 	u_int8_t	digit;
350 
351 	memset(lladdr, 0, ETHER_ADDR_LEN);
352 	for (i = j = 0; mac[i] != '\0' && i < maxlen; i++) {
353 		if (mac[i] >= '0' && mac[i] <= '9')
354 			digit = mac[i] - '0';
355 		else if (mac[i] >= 'A' && mac[i] <= 'F')
356 			digit = mac[i] - 'A' + 10;
357 		else if (mac[i] >= 'a' && mac[i] <= 'f')
358 			digit = mac[i] - 'a' + 10;
359 		else
360 			continue;
361 		if ((j & 1) == 0)
362 			digit <<= 4;
363 		lladdr[j++/2] |= digit;
364 	}
365 
366 	return (i);
367 }
368 
369 int
370 myx_query(struct myx_softc *sc, char *part, size_t partlen)
371 {
372 	struct myx_gen_hdr hdr;
373 	u_int32_t	offset;
374 	u_int8_t	strings[MYX_STRING_SPECS_SIZE];
375 	u_int		i, len, maxlen;
376 
377 	myx_read(sc, MYX_HEADER_POS, &offset, sizeof(offset));
378 	offset = betoh32(offset);
379 	if (offset + sizeof(hdr) > sc->sc_mems) {
380 		printf(": header is outside register window\n");
381 		return (1);
382 	}
383 
384 	myx_read(sc, offset, &hdr, sizeof(hdr));
385 	offset = betoh32(hdr.fw_specs);
386 	len = min(betoh32(hdr.fw_specs_len), sizeof(strings));
387 
388 	bus_space_read_region_1(sc->sc_memt, sc->sc_memh, offset, strings, len);
389 
390 	for (i = 0; i < len; i++) {
391 		maxlen = len - i;
392 		if (strings[i] == '\0')
393 			break;
394 		if (maxlen > 4 && memcmp("MAC=", &strings[i], 4) == 0) {
395 			i += 4;
396 			i += myx_ether_aton(&strings[i],
397 			    sc->sc_ac.ac_enaddr, maxlen);
398 		} else if (maxlen > 3 && memcmp("PC=", &strings[i], 3) == 0) {
399 			i += 3;
400 			i += strlcpy(part, &strings[i], min(maxlen, partlen));
401 		}
402 		for (; i < len; i++) {
403 			if (strings[i] == '\0')
404 				break;
405 		}
406 	}
407 
408 	return (0);
409 }
410 
411 int
412 myx_loadfirmware(struct myx_softc *sc, const char *filename)
413 {
414 	struct myx_gen_hdr	hdr;
415 	u_int8_t		*fw;
416 	size_t			fwlen;
417 	u_int32_t		offset;
418 	u_int			i, ret = 1;
419 
420 	if (loadfirmware(filename, &fw, &fwlen) != 0) {
421 		printf("%s: could not load firmware %s\n", DEVNAME(sc),
422 		    filename);
423 		return (1);
424 	}
425 	if (fwlen > MYX_SRAM_SIZE || fwlen < MYXFW_MIN_LEN) {
426 		printf("%s: invalid firmware %s size\n", DEVNAME(sc), filename);
427 		goto err;
428 	}
429 
430 	memcpy(&offset, fw + MYX_HEADER_POS, sizeof(offset));
431 	offset = betoh32(offset);
432 	if ((offset + sizeof(hdr)) > fwlen) {
433 		printf("%s: invalid firmware %s\n", DEVNAME(sc), filename);
434 		goto err;
435 	}
436 
437 	memcpy(&hdr, fw + offset, sizeof(hdr));
438 	DPRINTF(MYXDBG_INIT, "%s: "
439 	    "fw hdr off %u, length %u, type 0x%x, version %s\n",
440 	    DEVNAME(sc), offset, betoh32(hdr.fw_hdrlength),
441 	    betoh32(hdr.fw_type), hdr.fw_version);
442 
443 	if (betoh32(hdr.fw_type) != MYXFW_TYPE_ETH ||
444 	    memcmp(MYXFW_VER, hdr.fw_version, strlen(MYXFW_VER)) != 0) {
445 		printf("%s: invalid firmware type 0x%x version %s\n",
446 		    DEVNAME(sc), betoh32(hdr.fw_type), hdr.fw_version);
447 		goto err;
448 	}
449 
450 	/* Write the firmware to the card's SRAM */
451 	for (i = 0; i < fwlen; i += 256)
452 		myx_write(sc, i + MYX_FW, fw + i, min(256, fwlen - i));
453 
454 	if (myx_boot(sc, fwlen) != 0) {
455 		printf("%s: failed to boot %s\n", DEVNAME(sc), filename);
456 		goto err;
457 	}
458 
459 	ret = 0;
460 
461 err:
462 	free(fw, M_DEVBUF, fwlen);
463 	return (ret);
464 }
465 
466 void
467 myx_attachhook(struct device *self)
468 {
469 	struct myx_softc	*sc = (struct myx_softc *)self;
470 	struct ifnet		*ifp = &sc->sc_ac.ac_if;
471 	struct myx_cmd		 mc;
472 
473 	/* Allocate command DMA memory */
474 	if (myx_dmamem_alloc(sc, &sc->sc_cmddma, MYXALIGN_CMD,
475 	    MYXALIGN_CMD) != 0) {
476 		printf("%s: failed to allocate command DMA memory\n",
477 		    DEVNAME(sc));
478 		return;
479 	}
480 
481 	/* Try the firmware stored on disk */
482 	if (myx_loadfirmware(sc, MYXFW_ALIGNED) != 0) {
483 		/* error printed by myx_loadfirmware */
484 		goto freecmd;
485 	}
486 
487 	memset(&mc, 0, sizeof(mc));
488 
489 	if (myx_cmd(sc, MYXCMD_RESET, &mc, NULL) != 0) {
490 		printf("%s: failed to reset the device\n", DEVNAME(sc));
491 		goto freecmd;
492 	}
493 
494 	sc->sc_tx_boundary = 4096;
495 
496 	if (myx_probe_firmware(sc) != 0) {
497 		printf("%s: error while selecting firmware\n", DEVNAME(sc));
498 		goto freecmd;
499 	}
500 
501 	sc->sc_irqh = pci_intr_establish(sc->sc_pc, sc->sc_ih,
502 	    IPL_NET | IPL_MPSAFE, myx_intr, sc, DEVNAME(sc));
503 	if (sc->sc_irqh == NULL) {
504 		printf("%s: unable to establish interrupt\n", DEVNAME(sc));
505 		goto freecmd;
506 	}
507 
508 	ifp->if_softc = sc;
509 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
510 	ifp->if_xflags = IFXF_MPSAFE;
511 	ifp->if_ioctl = myx_ioctl;
512 	ifp->if_start = myx_start;
513 	ifp->if_watchdog = myx_watchdog;
514 	ifp->if_hardmtu = 9000;
515 	strlcpy(ifp->if_xname, DEVNAME(sc), IFNAMSIZ);
516 	IFQ_SET_MAXLEN(&ifp->if_snd, 1);
517 
518 	ifp->if_capabilities = IFCAP_VLAN_MTU;
519 #if 0
520 	ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING;
521 	ifp->if_capabilities |= IFCAP_CSUM_IPv4 | IFCAP_CSUM_TCPv4 |
522 	    IFCAP_CSUM_UDPv4;
523 #endif
524 
525 	ifmedia_init(&sc->sc_media, 0, myx_media_change, myx_media_status);
526 	ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_AUTO, 0, NULL);
527 	ifmedia_set(&sc->sc_media, IFM_ETHER | IFM_AUTO);
528 
529 	if_attach(ifp);
530 	ether_ifattach(ifp);
531 
532 	return;
533 
534 freecmd:
535 	myx_dmamem_free(sc, &sc->sc_cmddma);
536 }
537 
538 int
539 myx_probe_firmware(struct myx_softc *sc)
540 {
541 	struct myx_dmamem test;
542 	bus_dmamap_t map;
543 	struct myx_cmd mc;
544 	pcireg_t csr;
545 	int offset;
546 	int width = 0;
547 
548 	if (pci_get_capability(sc->sc_pc, sc->sc_tag, PCI_CAP_PCIEXPRESS,
549 	    &offset, NULL)) {
550 		csr = pci_conf_read(sc->sc_pc, sc->sc_tag,
551 		    offset + PCI_PCIE_LCSR);
552 		width = (csr >> 20) & 0x3f;
553 
554 		if (width <= 4) {
555 			/*
556 			 * if the link width is 4 or less we can use the
557 			 * aligned firmware.
558 			 */
559 			return (0);
560 		}
561 	}
562 
563 	if (myx_dmamem_alloc(sc, &test, 4096, 4096) != 0)
564 		return (1);
565 	map = test.mxm_map;
566 
567 	bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
568 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
569 
570 	memset(&mc, 0, sizeof(mc));
571 	mc.mc_data0 = htobe32(MYX_ADDRLOW(map->dm_segs[0].ds_addr));
572 	mc.mc_data1 = htobe32(MYX_ADDRHIGH(map->dm_segs[0].ds_addr));
573 	mc.mc_data2 = htobe32(4096 * 0x10000);
574 	if (myx_cmd(sc, MYXCMD_UNALIGNED_DMA_TEST, &mc, NULL) != 0) {
575 		printf("%s: DMA read test failed\n", DEVNAME(sc));
576 		goto fail;
577 	}
578 
579 	memset(&mc, 0, sizeof(mc));
580 	mc.mc_data0 = htobe32(MYX_ADDRLOW(map->dm_segs[0].ds_addr));
581 	mc.mc_data1 = htobe32(MYX_ADDRHIGH(map->dm_segs[0].ds_addr));
582 	mc.mc_data2 = htobe32(4096 * 0x1);
583 	if (myx_cmd(sc, MYXCMD_UNALIGNED_DMA_TEST, &mc, NULL) != 0) {
584 		printf("%s: DMA write test failed\n", DEVNAME(sc));
585 		goto fail;
586 	}
587 
588 	memset(&mc, 0, sizeof(mc));
589 	mc.mc_data0 = htobe32(MYX_ADDRLOW(map->dm_segs[0].ds_addr));
590 	mc.mc_data1 = htobe32(MYX_ADDRHIGH(map->dm_segs[0].ds_addr));
591 	mc.mc_data2 = htobe32(4096 * 0x10001);
592 	if (myx_cmd(sc, MYXCMD_UNALIGNED_DMA_TEST, &mc, NULL) != 0) {
593 		printf("%s: DMA read/write test failed\n", DEVNAME(sc));
594 		goto fail;
595 	}
596 
597 	bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
598 	    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
599 	myx_dmamem_free(sc, &test);
600 	return (0);
601 
602 fail:
603 	bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
604 	    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
605 	myx_dmamem_free(sc, &test);
606 
607 	if (myx_loadfirmware(sc, MYXFW_UNALIGNED) != 0) {
608 		printf("%s: unable to load %s\n", DEVNAME(sc),
609 		    MYXFW_UNALIGNED);
610 		return (1);
611 	}
612 
613 	sc->sc_tx_boundary = 2048;
614 
615 	printf("%s: using unaligned firmware\n", DEVNAME(sc));
616 	return (0);
617 }
618 
619 void
620 myx_read(struct myx_softc *sc, bus_size_t off, void *ptr, bus_size_t len)
621 {
622 	bus_space_barrier(sc->sc_memt, sc->sc_memh, off, len,
623 	    BUS_SPACE_BARRIER_READ);
624 	bus_space_read_raw_region_4(sc->sc_memt, sc->sc_memh, off, ptr, len);
625 }
626 
627 void
628 myx_write(struct myx_softc *sc, bus_size_t off, void *ptr, bus_size_t len)
629 {
630 	bus_space_write_raw_region_4(sc->sc_memt, sc->sc_memh, off, ptr, len);
631 	bus_space_barrier(sc->sc_memt, sc->sc_memh, off, len,
632 	    BUS_SPACE_BARRIER_WRITE);
633 }
634 
635 int
636 myx_dmamem_alloc(struct myx_softc *sc, struct myx_dmamem *mxm,
637     bus_size_t size, u_int align)
638 {
639 	mxm->mxm_size = size;
640 
641 	if (bus_dmamap_create(sc->sc_dmat, mxm->mxm_size, 1,
642 	    mxm->mxm_size, 0, BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW,
643 	    &mxm->mxm_map) != 0)
644 		return (1);
645 	if (bus_dmamem_alloc(sc->sc_dmat, mxm->mxm_size,
646 	    align, 0, &mxm->mxm_seg, 1, &mxm->mxm_nsegs,
647 	    BUS_DMA_WAITOK | BUS_DMA_ZERO) != 0)
648 		goto destroy;
649 	if (bus_dmamem_map(sc->sc_dmat, &mxm->mxm_seg, mxm->mxm_nsegs,
650 	    mxm->mxm_size, &mxm->mxm_kva, BUS_DMA_WAITOK) != 0)
651 		goto free;
652 	if (bus_dmamap_load(sc->sc_dmat, mxm->mxm_map, mxm->mxm_kva,
653 	    mxm->mxm_size, NULL, BUS_DMA_WAITOK) != 0)
654 		goto unmap;
655 
656 	return (0);
657  unmap:
658 	bus_dmamem_unmap(sc->sc_dmat, mxm->mxm_kva, mxm->mxm_size);
659  free:
660 	bus_dmamem_free(sc->sc_dmat, &mxm->mxm_seg, 1);
661  destroy:
662 	bus_dmamap_destroy(sc->sc_dmat, mxm->mxm_map);
663 	return (1);
664 }
665 
666 void
667 myx_dmamem_free(struct myx_softc *sc, struct myx_dmamem *mxm)
668 {
669 	bus_dmamap_unload(sc->sc_dmat, mxm->mxm_map);
670 	bus_dmamem_unmap(sc->sc_dmat, mxm->mxm_kva, mxm->mxm_size);
671 	bus_dmamem_free(sc->sc_dmat, &mxm->mxm_seg, 1);
672 	bus_dmamap_destroy(sc->sc_dmat, mxm->mxm_map);
673 }
674 
675 int
676 myx_cmd(struct myx_softc *sc, u_int32_t cmd, struct myx_cmd *mc, u_int32_t *r)
677 {
678 	bus_dmamap_t		 map = sc->sc_cmddma.mxm_map;
679 	struct myx_response	*mr;
680 	u_int			 i;
681 	u_int32_t		 result, data;
682 #ifdef MYX_DEBUG
683 	static const char *cmds[MYXCMD_MAX] = {
684 		"CMD_NONE",
685 		"CMD_RESET",
686 		"CMD_GET_VERSION",
687 		"CMD_SET_INTRQDMA",
688 		"CMD_SET_BIGBUFSZ",
689 		"CMD_SET_SMALLBUFSZ",
690 		"CMD_GET_TXRINGOFF",
691 		"CMD_GET_RXSMALLRINGOFF",
692 		"CMD_GET_RXBIGRINGOFF",
693 		"CMD_GET_INTRACKOFF",
694 		"CMD_GET_INTRDEASSERTOFF",
695 		"CMD_GET_TXRINGSZ",
696 		"CMD_GET_RXRINGSZ",
697 		"CMD_SET_INTRQSZ",
698 		"CMD_SET_IFUP",
699 		"CMD_SET_IFDOWN",
700 		"CMD_SET_MTU",
701 		"CMD_GET_INTRCOALDELAYOFF",
702 		"CMD_SET_STATSINTVL",
703 		"CMD_SET_STATSDMA_OLD",
704 		"CMD_SET_PROMISC",
705 		"CMD_UNSET_PROMISC",
706 		"CMD_SET_LLADDR",
707 		"CMD_SET_FC",
708 		"CMD_UNSET_FC",
709 		"CMD_DMA_TEST",
710 		"CMD_SET_ALLMULTI",
711 		"CMD_UNSET_ALLMULTI",
712 		"CMD_SET_MCASTGROUP",
713 		"CMD_UNSET_MCASTGROUP",
714 		"CMD_UNSET_MCAST",
715 		"CMD_SET_STATSDMA",
716 		"CMD_UNALIGNED_DMA_TEST",
717 		"CMD_GET_UNALIGNED_STATUS"
718 	};
719 #endif
720 
721 	mc->mc_cmd = htobe32(cmd);
722 	mc->mc_addr_high = htobe32(MYX_ADDRHIGH(map->dm_segs[0].ds_addr));
723 	mc->mc_addr_low = htobe32(MYX_ADDRLOW(map->dm_segs[0].ds_addr));
724 
725 	mr = (struct myx_response *)sc->sc_cmddma.mxm_kva;
726 	mr->mr_result = 0xffffffff;
727 
728 	/* Send command */
729 	myx_write(sc, MYX_CMD, (u_int8_t *)mc, sizeof(struct myx_cmd));
730 	bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
731 	    BUS_DMASYNC_PREREAD);
732 
733 	for (i = 0; i < 20; i++) {
734 		bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
735 		    BUS_DMASYNC_POSTREAD);
736 		result = betoh32(mr->mr_result);
737 		data = betoh32(mr->mr_data);
738 
739 		if (result != 0xffffffff)
740 			break;
741 
742 		bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
743 		    BUS_DMASYNC_PREREAD);
744 		delay(1000);
745 	}
746 
747 	DPRINTF(MYXDBG_CMD, "%s(%s): %s completed, i %d, "
748 	    "result 0x%x, data 0x%x (%u)\n", DEVNAME(sc), __func__,
749 	    cmds[cmd], i, result, data, data);
750 
751 	if (result != 0)
752 		return (-1);
753 
754 	if (r != NULL)
755 		*r = data;
756 	return (0);
757 }
758 
759 int
760 myx_boot(struct myx_softc *sc, u_int32_t length)
761 {
762 	struct myx_bootcmd	 bc;
763 	bus_dmamap_t		 map = sc->sc_cmddma.mxm_map;
764 	u_int32_t		*status;
765 	u_int			 i, ret = 1;
766 
767 	memset(&bc, 0, sizeof(bc));
768 	bc.bc_addr_high = htobe32(MYX_ADDRHIGH(map->dm_segs[0].ds_addr));
769 	bc.bc_addr_low = htobe32(MYX_ADDRLOW(map->dm_segs[0].ds_addr));
770 	bc.bc_result = 0xffffffff;
771 	bc.bc_offset = htobe32(MYX_FW_BOOT);
772 	bc.bc_length = htobe32(length - 8);
773 	bc.bc_copyto = htobe32(8);
774 	bc.bc_jumpto = htobe32(0);
775 
776 	status = (u_int32_t *)sc->sc_cmddma.mxm_kva;
777 	*status = 0;
778 
779 	/* Send command */
780 	myx_write(sc, MYX_BOOT, &bc, sizeof(bc));
781 	bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
782 	    BUS_DMASYNC_PREREAD);
783 
784 	for (i = 0; i < 200; i++) {
785 		bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
786 		    BUS_DMASYNC_POSTREAD);
787 		if (*status == 0xffffffff) {
788 			ret = 0;
789 			break;
790 		}
791 
792 		bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
793 		    BUS_DMASYNC_PREREAD);
794 		delay(1000);
795 	}
796 
797 	DPRINTF(MYXDBG_CMD, "%s: boot completed, i %d, result %d\n",
798 	    DEVNAME(sc), i, ret);
799 
800 	return (ret);
801 }
802 
803 int
804 myx_rdma(struct myx_softc *sc, u_int do_enable)
805 {
806 	struct myx_rdmacmd	 rc;
807 	bus_dmamap_t		 map = sc->sc_cmddma.mxm_map;
808 	bus_dmamap_t		 pad = sc->sc_paddma.mxm_map;
809 	u_int32_t		*status;
810 	int			 ret = 1;
811 	u_int			 i;
812 
813 	/*
814 	 * It is required to setup a _dummy_ RDMA address. It also makes
815 	 * some PCI-E chipsets resend dropped messages.
816 	 */
817 	rc.rc_addr_high = htobe32(MYX_ADDRHIGH(map->dm_segs[0].ds_addr));
818 	rc.rc_addr_low = htobe32(MYX_ADDRLOW(map->dm_segs[0].ds_addr));
819 	rc.rc_result = 0xffffffff;
820 	rc.rc_rdma_high = htobe32(MYX_ADDRHIGH(pad->dm_segs[0].ds_addr));
821 	rc.rc_rdma_low = htobe32(MYX_ADDRLOW(pad->dm_segs[0].ds_addr));
822 	rc.rc_enable = htobe32(do_enable);
823 
824 	status = (u_int32_t *)sc->sc_cmddma.mxm_kva;
825 	*status = 0;
826 
827 	bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
828 	    BUS_DMASYNC_PREREAD);
829 
830 	/* Send command */
831 	myx_write(sc, MYX_RDMA, &rc, sizeof(rc));
832 
833 	for (i = 0; i < 20; i++) {
834 		bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
835 		    BUS_DMASYNC_POSTREAD);
836 
837 		if (*status == 0xffffffff) {
838 			ret = 0;
839 			break;
840 		}
841 
842 		bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
843 		    BUS_DMASYNC_PREREAD);
844 		delay(1000);
845 	}
846 
847 	DPRINTF(MYXDBG_CMD, "%s(%s): dummy RDMA %s, i %d, result 0x%x\n",
848 	    DEVNAME(sc), __func__,
849 	    do_enable ? "enabled" : "disabled", i, betoh32(*status));
850 
851 	return (ret);
852 }
853 
854 int
855 myx_media_change(struct ifnet *ifp)
856 {
857 	/* ignore */
858 	return (0);
859 }
860 
861 void
862 myx_media_status(struct ifnet *ifp, struct ifmediareq *imr)
863 {
864 	struct myx_softc	*sc = (struct myx_softc *)ifp->if_softc;
865 	bus_dmamap_t		 map = sc->sc_sts_dma.mxm_map;
866 	u_int32_t		 sts;
867 
868 	imr->ifm_active = IFM_ETHER | IFM_AUTO;
869 	if (!ISSET(ifp->if_flags, IFF_RUNNING)) {
870 		imr->ifm_status = 0;
871 		return;
872 	}
873 
874 	bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
875 	    BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
876 	sts = sc->sc_sts->ms_linkstate;
877 	bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
878 	    BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
879 
880 	myx_link_state(sc, sts);
881 
882 	imr->ifm_status = IFM_AVALID;
883 	if (!LINK_STATE_IS_UP(ifp->if_link_state))
884 		return;
885 
886 	imr->ifm_active |= IFM_FDX | IFM_FLOW |
887 	    IFM_ETH_RXPAUSE | IFM_ETH_TXPAUSE;
888 	imr->ifm_status |= IFM_ACTIVE;
889 }
890 
891 void
892 myx_link_state(struct myx_softc *sc, u_int32_t sts)
893 {
894 	struct ifnet		*ifp = &sc->sc_ac.ac_if;
895 	int			 link_state = LINK_STATE_DOWN;
896 
897 	if (betoh32(sts) == MYXSTS_LINKUP)
898 		link_state = LINK_STATE_FULL_DUPLEX;
899 	if (ifp->if_link_state != link_state) {
900 		ifp->if_link_state = link_state;
901 		if_link_state_change(ifp);
902 		ifp->if_baudrate = LINK_STATE_IS_UP(ifp->if_link_state) ?
903 		    IF_Gbps(10) : 0;
904 	}
905 }
906 
907 void
908 myx_watchdog(struct ifnet *ifp)
909 {
910 	return;
911 }
912 
913 int
914 myx_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
915 {
916 	struct myx_softc	*sc = (struct myx_softc *)ifp->if_softc;
917 	struct ifreq		*ifr = (struct ifreq *)data;
918 	int			 s, error = 0;
919 
920 	s = splnet();
921 
922 	switch (cmd) {
923 	case SIOCSIFADDR:
924 		ifp->if_flags |= IFF_UP;
925 		/* FALLTHROUGH */
926 
927 	case SIOCSIFFLAGS:
928 		if (ISSET(ifp->if_flags, IFF_UP)) {
929 			if (ISSET(ifp->if_flags, IFF_RUNNING))
930 				error = ENETRESET;
931 			else
932 				myx_up(sc);
933 		} else {
934 			if (ISSET(ifp->if_flags, IFF_RUNNING))
935 				myx_down(sc);
936 		}
937 		break;
938 
939 	case SIOCGIFMEDIA:
940 	case SIOCSIFMEDIA:
941 		error = ifmedia_ioctl(ifp, ifr, &sc->sc_media, cmd);
942 		break;
943 
944 	case SIOCGIFRXR:
945 		error = myx_rxrinfo(sc, (struct if_rxrinfo *)ifr->ifr_data);
946 		break;
947 
948 	default:
949 		error = ether_ioctl(ifp, &sc->sc_ac, cmd, data);
950 	}
951 
952 	if (error == ENETRESET) {
953 		if ((ifp->if_flags & (IFF_UP | IFF_RUNNING)) ==
954 		    (IFF_UP | IFF_RUNNING))
955 			myx_iff(sc);
956 		error = 0;
957 	}
958 
959 	splx(s);
960 	return (error);
961 }
962 
963 int
964 myx_rxrinfo(struct myx_softc *sc, struct if_rxrinfo *ifri)
965 {
966 	struct if_rxring_info ifr[2];
967 
968 	memset(ifr, 0, sizeof(ifr));
969 
970 	ifr[0].ifr_size = MYX_RXSMALL_SIZE;
971 	ifr[0].ifr_info = sc->sc_rx_ring[0].mrr_rxr;
972 
973 	ifr[1].ifr_size = MYX_RXBIG_SIZE;
974 	ifr[1].ifr_info = sc->sc_rx_ring[1].mrr_rxr;
975 
976 	return (if_rxr_info_ioctl(ifri, nitems(ifr), ifr));
977 }
978 
979 void
980 myx_up(struct myx_softc *sc)
981 {
982 	struct ifnet		*ifp = &sc->sc_ac.ac_if;
983 	struct myx_cmd		mc;
984 	bus_dmamap_t		map;
985 	size_t			size;
986 	u_int			maxpkt;
987 	u_int32_t		r;
988 
989 	memset(&mc, 0, sizeof(mc));
990 	if (myx_cmd(sc, MYXCMD_RESET, &mc, NULL) != 0) {
991 		printf("%s: failed to reset the device\n", DEVNAME(sc));
992 		return;
993 	}
994 
995 	if (myx_dmamem_alloc(sc, &sc->sc_zerodma,
996 	    64, MYXALIGN_CMD) != 0) {
997 		printf("%s: failed to allocate zero pad memory\n",
998 		    DEVNAME(sc));
999 		return;
1000 	}
1001 	memset(sc->sc_zerodma.mxm_kva, 0, 64);
1002 	bus_dmamap_sync(sc->sc_dmat, sc->sc_zerodma.mxm_map, 0,
1003 	    sc->sc_zerodma.mxm_map->dm_mapsize, BUS_DMASYNC_PREREAD);
1004 
1005 	if (myx_dmamem_alloc(sc, &sc->sc_paddma,
1006 	    MYXALIGN_CMD, MYXALIGN_CMD) != 0) {
1007 		printf("%s: failed to allocate pad DMA memory\n",
1008 		    DEVNAME(sc));
1009 		goto free_zero;
1010 	}
1011 	bus_dmamap_sync(sc->sc_dmat, sc->sc_paddma.mxm_map, 0,
1012 	    sc->sc_paddma.mxm_map->dm_mapsize,
1013 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1014 
1015 	if (myx_rdma(sc, MYXRDMA_ON) != 0) {
1016 		printf("%s: failed to enable dummy RDMA\n", DEVNAME(sc));
1017 		goto free_pad;
1018 	}
1019 
1020 	if (myx_cmd(sc, MYXCMD_GET_RXRINGSZ, &mc, &r) != 0) {
1021 		printf("%s: unable to get rx ring size\n", DEVNAME(sc));
1022 		goto free_pad;
1023 	}
1024 	sc->sc_rx_ring_count = r / sizeof(struct myx_rx_desc);
1025 
1026 	memset(&mc, 0, sizeof(mc));
1027 	if (myx_cmd(sc, MYXCMD_GET_TXRINGSZ, &mc, &r) != 0) {
1028 		printf("%s: unable to get tx ring size\n", DEVNAME(sc));
1029 		goto free_pad;
1030 	}
1031 	sc->sc_tx_ring_prod = 0;
1032 	sc->sc_tx_ring_cons = 0;
1033 	sc->sc_tx_ring_count = r / sizeof(struct myx_tx_desc);
1034 	sc->sc_tx_nsegs = min(16, sc->sc_tx_ring_count / 4); /* magic */
1035 	sc->sc_tx_count = 0;
1036 	IFQ_SET_MAXLEN(&ifp->if_snd, sc->sc_tx_ring_count - 1);
1037 
1038 	/* Allocate Interrupt Queue */
1039 
1040 	sc->sc_intrq_count = sc->sc_rx_ring_count * 2;
1041 	sc->sc_intrq_idx = 0;
1042 
1043 	size = sc->sc_intrq_count * sizeof(struct myx_intrq_desc);
1044 	if (myx_dmamem_alloc(sc, &sc->sc_intrq_dma,
1045 	    size, MYXALIGN_DATA) != 0) {
1046 		goto free_pad;
1047 	}
1048 	sc->sc_intrq = (struct myx_intrq_desc *)sc->sc_intrq_dma.mxm_kva;
1049 	map = sc->sc_intrq_dma.mxm_map;
1050 	memset(sc->sc_intrq, 0, size);
1051 	bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
1052 	    BUS_DMASYNC_PREREAD);
1053 
1054 	memset(&mc, 0, sizeof(mc));
1055 	mc.mc_data0 = htobe32(size);
1056 	if (myx_cmd(sc, MYXCMD_SET_INTRQSZ, &mc, NULL) != 0) {
1057 		printf("%s: failed to set intrq size\n", DEVNAME(sc));
1058 		goto free_intrq;
1059 	}
1060 
1061 	memset(&mc, 0, sizeof(mc));
1062 	mc.mc_data0 = htobe32(MYX_ADDRLOW(map->dm_segs[0].ds_addr));
1063 	mc.mc_data1 = htobe32(MYX_ADDRHIGH(map->dm_segs[0].ds_addr));
1064 	if (myx_cmd(sc, MYXCMD_SET_INTRQDMA, &mc, NULL) != 0) {
1065 		printf("%s: failed to set intrq address\n", DEVNAME(sc));
1066 		goto free_intrq;
1067 	}
1068 
1069 	/*
1070 	 * get interrupt offsets
1071 	 */
1072 
1073 	memset(&mc, 0, sizeof(mc));
1074 	if (myx_cmd(sc, MYXCMD_GET_INTRACKOFF, &mc,
1075 	    &sc->sc_irqclaimoff) != 0) {
1076 		printf("%s: failed to get IRQ ack offset\n", DEVNAME(sc));
1077 		goto free_intrq;
1078 	}
1079 
1080 	memset(&mc, 0, sizeof(mc));
1081 	if (myx_cmd(sc, MYXCMD_GET_INTRDEASSERTOFF, &mc,
1082 	    &sc->sc_irqdeassertoff) != 0) {
1083 		printf("%s: failed to get IRQ deassert offset\n", DEVNAME(sc));
1084 		goto free_intrq;
1085 	}
1086 
1087 	memset(&mc, 0, sizeof(mc));
1088 	if (myx_cmd(sc, MYXCMD_GET_INTRCOALDELAYOFF, &mc,
1089 	    &sc->sc_irqcoaloff) != 0) {
1090 		printf("%s: failed to get IRQ coal offset\n", DEVNAME(sc));
1091 		goto free_intrq;
1092 	}
1093 
1094 	/* Set an appropriate interrupt coalescing period */
1095 	r = htobe32(MYX_IRQCOALDELAY);
1096 	myx_write(sc, sc->sc_irqcoaloff, &r, sizeof(r));
1097 
1098 	if (myx_setlladdr(sc, MYXCMD_SET_LLADDR, LLADDR(ifp->if_sadl)) != 0) {
1099 		printf("%s: failed to configure lladdr\n", DEVNAME(sc));
1100 		goto free_intrq;
1101 	}
1102 
1103 	memset(&mc, 0, sizeof(mc));
1104 	if (myx_cmd(sc, MYXCMD_UNSET_PROMISC, &mc, NULL) != 0) {
1105 		printf("%s: failed to disable promisc mode\n", DEVNAME(sc));
1106 		goto free_intrq;
1107 	}
1108 
1109 	memset(&mc, 0, sizeof(mc));
1110 	if (myx_cmd(sc, MYXCMD_FC_DEFAULT, &mc, NULL) != 0) {
1111 		printf("%s: failed to configure flow control\n", DEVNAME(sc));
1112 		goto free_intrq;
1113 	}
1114 
1115 	memset(&mc, 0, sizeof(mc));
1116 	if (myx_cmd(sc, MYXCMD_GET_TXRINGOFF, &mc,
1117 	    &sc->sc_tx_ring_offset) != 0) {
1118 		printf("%s: unable to get tx ring offset\n", DEVNAME(sc));
1119 		goto free_intrq;
1120 	}
1121 
1122 	memset(&mc, 0, sizeof(mc));
1123 	if (myx_cmd(sc, MYXCMD_GET_RXSMALLRINGOFF, &mc,
1124 	    &sc->sc_rx_ring[MYX_RXSMALL].mrr_offset) != 0) {
1125 		printf("%s: unable to get small rx ring offset\n", DEVNAME(sc));
1126 		goto free_intrq;
1127 	}
1128 
1129 	memset(&mc, 0, sizeof(mc));
1130 	if (myx_cmd(sc, MYXCMD_GET_RXBIGRINGOFF, &mc,
1131 	    &sc->sc_rx_ring[MYX_RXBIG].mrr_offset) != 0) {
1132 		printf("%s: unable to get big rx ring offset\n", DEVNAME(sc));
1133 		goto free_intrq;
1134 	}
1135 
1136 	/* Allocate Interrupt Data */
1137 	if (myx_dmamem_alloc(sc, &sc->sc_sts_dma,
1138 	    sizeof(struct myx_status), MYXALIGN_DATA) != 0) {
1139 		printf("%s: failed to allocate status DMA memory\n",
1140 		    DEVNAME(sc));
1141 		goto free_intrq;
1142 	}
1143 	sc->sc_sts = (struct myx_status *)sc->sc_sts_dma.mxm_kva;
1144 	map = sc->sc_sts_dma.mxm_map;
1145 	bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
1146 	    BUS_DMASYNC_PREREAD);
1147 
1148 	memset(&mc, 0, sizeof(mc));
1149 	mc.mc_data0 = htobe32(MYX_ADDRLOW(map->dm_segs[0].ds_addr));
1150 	mc.mc_data1 = htobe32(MYX_ADDRHIGH(map->dm_segs[0].ds_addr));
1151 	mc.mc_data2 = htobe32(sizeof(struct myx_status));
1152 	if (myx_cmd(sc, MYXCMD_SET_STATSDMA, &mc, NULL) != 0) {
1153 		printf("%s: failed to set status DMA offset\n", DEVNAME(sc));
1154 		goto free_sts;
1155 	}
1156 
1157 	maxpkt = ifp->if_hardmtu + ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
1158 
1159 	memset(&mc, 0, sizeof(mc));
1160 	mc.mc_data0 = htobe32(maxpkt);
1161 	if (myx_cmd(sc, MYXCMD_SET_MTU, &mc, NULL) != 0) {
1162 		printf("%s: failed to set MTU size %d\n", DEVNAME(sc), maxpkt);
1163 		goto free_sts;
1164 	}
1165 
1166 	if (myx_tx_init(sc, maxpkt) != 0)
1167 		goto free_sts;
1168 
1169 	if (myx_rx_init(sc, MYX_RXSMALL, MCLBYTES) != 0)
1170 		goto free_tx_ring;
1171 
1172 	if (myx_rx_fill(sc, &sc->sc_rx_ring[MYX_RXSMALL]) != 0)
1173 		goto free_rx_ring_small;
1174 
1175 	if (myx_rx_init(sc, MYX_RXBIG, MYX_RXBIG_SIZE) != 0)
1176 		goto empty_rx_ring_small;
1177 
1178 	if (myx_rx_fill(sc, &sc->sc_rx_ring[MYX_RXBIG]) != 0)
1179 		goto free_rx_ring_big;
1180 
1181 	memset(&mc, 0, sizeof(mc));
1182 	mc.mc_data0 = htobe32(MYX_RXSMALL_SIZE - ETHER_ALIGN);
1183 	if (myx_cmd(sc, MYXCMD_SET_SMALLBUFSZ, &mc, NULL) != 0) {
1184 		printf("%s: failed to set small buf size\n", DEVNAME(sc));
1185 		goto empty_rx_ring_big;
1186 	}
1187 
1188 	memset(&mc, 0, sizeof(mc));
1189 	mc.mc_data0 = htobe32(16384);
1190 	if (myx_cmd(sc, MYXCMD_SET_BIGBUFSZ, &mc, NULL) != 0) {
1191 		printf("%s: failed to set big buf size\n", DEVNAME(sc));
1192 		goto empty_rx_ring_big;
1193 	}
1194 
1195 	sc->sc_state = MYX_S_RUNNING;
1196 
1197 	if (myx_cmd(sc, MYXCMD_SET_IFUP, &mc, NULL) != 0) {
1198 		printf("%s: failed to start the device\n", DEVNAME(sc));
1199 		goto empty_rx_ring_big;
1200 	}
1201 
1202 	ifq_clr_oactive(&ifp->if_snd);
1203 	SET(ifp->if_flags, IFF_RUNNING);
1204 	myx_iff(sc);
1205 	if_start(ifp);
1206 
1207 	return;
1208 
1209 empty_rx_ring_big:
1210 	myx_rx_empty(sc, &sc->sc_rx_ring[MYX_RXBIG]);
1211 free_rx_ring_big:
1212 	myx_rx_free(sc, &sc->sc_rx_ring[MYX_RXBIG]);
1213 empty_rx_ring_small:
1214 	myx_rx_empty(sc, &sc->sc_rx_ring[MYX_RXSMALL]);
1215 free_rx_ring_small:
1216 	myx_rx_free(sc, &sc->sc_rx_ring[MYX_RXSMALL]);
1217 free_tx_ring:
1218 	myx_tx_free(sc);
1219 free_sts:
1220 	bus_dmamap_sync(sc->sc_dmat, sc->sc_sts_dma.mxm_map, 0,
1221 	    sc->sc_sts_dma.mxm_map->dm_mapsize, BUS_DMASYNC_POSTREAD);
1222 	myx_dmamem_free(sc, &sc->sc_sts_dma);
1223 free_intrq:
1224 	bus_dmamap_sync(sc->sc_dmat, sc->sc_intrq_dma.mxm_map, 0,
1225 	    sc->sc_intrq_dma.mxm_map->dm_mapsize, BUS_DMASYNC_POSTREAD);
1226 	myx_dmamem_free(sc, &sc->sc_intrq_dma);
1227 free_pad:
1228 	bus_dmamap_sync(sc->sc_dmat, sc->sc_paddma.mxm_map, 0,
1229 	    sc->sc_paddma.mxm_map->dm_mapsize,
1230 	    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1231 	myx_dmamem_free(sc, &sc->sc_paddma);
1232 
1233 	memset(&mc, 0, sizeof(mc));
1234 	if (myx_cmd(sc, MYXCMD_RESET, &mc, NULL) != 0) {
1235 		printf("%s: failed to reset the device\n", DEVNAME(sc));
1236 	}
1237 free_zero:
1238 	bus_dmamap_sync(sc->sc_dmat, sc->sc_zerodma.mxm_map, 0,
1239 	    sc->sc_zerodma.mxm_map->dm_mapsize, BUS_DMASYNC_POSTREAD);
1240 	myx_dmamem_free(sc, &sc->sc_zerodma);
1241 }
1242 
1243 int
1244 myx_setlladdr(struct myx_softc *sc, u_int32_t cmd, u_int8_t *addr)
1245 {
1246 	struct myx_cmd		 mc;
1247 
1248 	memset(&mc, 0, sizeof(mc));
1249 	mc.mc_data0 = htobe32(addr[0] << 24 | addr[1] << 16 |
1250 	    addr[2] << 8 | addr[3]);
1251 	mc.mc_data1 = htobe32(addr[4] << 8 | addr[5]);
1252 
1253 	if (myx_cmd(sc, cmd, &mc, NULL) != 0) {
1254 		printf("%s: failed to set the lladdr\n", DEVNAME(sc));
1255 		return (-1);
1256 	}
1257 	return (0);
1258 }
1259 
1260 void
1261 myx_iff(struct myx_softc *sc)
1262 {
1263 	struct myx_cmd		mc;
1264 	struct ifnet		*ifp = &sc->sc_ac.ac_if;
1265 	struct ether_multi	*enm;
1266 	struct ether_multistep	step;
1267 	u_int8_t *addr;
1268 
1269 	CLR(ifp->if_flags, IFF_ALLMULTI);
1270 
1271 	if (myx_cmd(sc, ISSET(ifp->if_flags, IFF_PROMISC) ?
1272 	    MYXCMD_SET_PROMISC : MYXCMD_UNSET_PROMISC, &mc, NULL) != 0) {
1273 		printf("%s: failed to configure promisc mode\n", DEVNAME(sc));
1274 		return;
1275 	}
1276 
1277 	if (myx_cmd(sc, MYXCMD_SET_ALLMULTI, &mc, NULL) != 0) {
1278 		printf("%s: failed to enable ALLMULTI\n", DEVNAME(sc));
1279 		return;
1280 	}
1281 
1282 	if (myx_cmd(sc, MYXCMD_UNSET_MCAST, &mc, NULL) != 0) {
1283 		printf("%s: failed to leave all mcast groups \n", DEVNAME(sc));
1284 		return;
1285 	}
1286 
1287 	if (ISSET(ifp->if_flags, IFF_PROMISC) ||
1288 	    sc->sc_ac.ac_multirangecnt > 0) {
1289 		SET(ifp->if_flags, IFF_ALLMULTI);
1290 		return;
1291 	}
1292 
1293 	ETHER_FIRST_MULTI(step, &sc->sc_ac, enm);
1294 	while (enm != NULL) {
1295 		addr = enm->enm_addrlo;
1296 
1297 		memset(&mc, 0, sizeof(mc));
1298 		mc.mc_data0 = htobe32(addr[0] << 24 | addr[1] << 16 |
1299 		    addr[2] << 8 | addr[3]);
1300 		mc.mc_data1 = htobe32(addr[4] << 24 | addr[5] << 16);
1301 		if (myx_cmd(sc, MYXCMD_SET_MCASTGROUP, &mc, NULL) != 0) {
1302 			printf("%s: failed to join mcast group\n", DEVNAME(sc));
1303 			return;
1304 		}
1305 
1306 		ETHER_NEXT_MULTI(step, enm);
1307 	}
1308 
1309 	memset(&mc, 0, sizeof(mc));
1310 	if (myx_cmd(sc, MYXCMD_UNSET_ALLMULTI, &mc, NULL) != 0) {
1311 		printf("%s: failed to disable ALLMULTI\n", DEVNAME(sc));
1312 		return;
1313 	}
1314 }
1315 
1316 void
1317 myx_down(struct myx_softc *sc)
1318 {
1319 	struct ifnet		*ifp = &sc->sc_ac.ac_if;
1320 	volatile struct myx_status *sts = sc->sc_sts;
1321 	bus_dmamap_t		 map = sc->sc_sts_dma.mxm_map;
1322 	struct sleep_state	 sls;
1323 	struct myx_cmd		 mc;
1324 	int			 s;
1325 	int			 ring;
1326 
1327 	CLR(ifp->if_flags, IFF_RUNNING);
1328 
1329 	bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
1330 	    BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
1331 	sc->sc_linkdown = sts->ms_linkdown;
1332 	bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
1333 	    BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1334 
1335 	sc->sc_state = MYX_S_DOWN;
1336 	membar_producer();
1337 
1338 	memset(&mc, 0, sizeof(mc));
1339 	(void)myx_cmd(sc, MYXCMD_SET_IFDOWN, &mc, NULL);
1340 
1341 	while (sc->sc_state != MYX_S_OFF) {
1342 		sleep_setup(&sls, sts, PWAIT, "myxdown");
1343 		membar_consumer();
1344 		sleep_finish(&sls, sc->sc_state != MYX_S_OFF);
1345 	}
1346 
1347 	s = splnet();
1348 	if (ifp->if_link_state != LINK_STATE_UNKNOWN) {
1349 		ifp->if_link_state = LINK_STATE_UNKNOWN;
1350 		ifp->if_baudrate = 0;
1351 		if_link_state_change(ifp);
1352 	}
1353 	splx(s);
1354 
1355 	memset(&mc, 0, sizeof(mc));
1356 	if (myx_cmd(sc, MYXCMD_RESET, &mc, NULL) != 0) {
1357 		printf("%s: failed to reset the device\n", DEVNAME(sc));
1358 	}
1359 
1360 	ifq_clr_oactive(&ifp->if_snd);
1361 	ifq_barrier(&ifp->if_snd);
1362 
1363 	for (ring = 0; ring < 2; ring++) {
1364 		struct myx_rx_ring *mrr = &sc->sc_rx_ring[ring];
1365 
1366 		timeout_del(&mrr->mrr_refill);
1367 		myx_rx_empty(sc, mrr);
1368 		myx_rx_free(sc, mrr);
1369 	}
1370 
1371 	myx_tx_empty(sc);
1372 	myx_tx_free(sc);
1373 
1374 	/* the sleep shizz above already synced this dmamem */
1375 	myx_dmamem_free(sc, &sc->sc_sts_dma);
1376 
1377 	bus_dmamap_sync(sc->sc_dmat, sc->sc_intrq_dma.mxm_map, 0,
1378 	    sc->sc_intrq_dma.mxm_map->dm_mapsize, BUS_DMASYNC_POSTREAD);
1379 	myx_dmamem_free(sc, &sc->sc_intrq_dma);
1380 
1381 	bus_dmamap_sync(sc->sc_dmat, sc->sc_paddma.mxm_map, 0,
1382 	    sc->sc_paddma.mxm_map->dm_mapsize, BUS_DMASYNC_POSTREAD);
1383 	myx_dmamem_free(sc, &sc->sc_paddma);
1384 
1385 	bus_dmamap_sync(sc->sc_dmat, sc->sc_zerodma.mxm_map, 0,
1386 	    sc->sc_zerodma.mxm_map->dm_mapsize, BUS_DMASYNC_POSTREAD);
1387 	myx_dmamem_free(sc, &sc->sc_zerodma);
1388 }
1389 
1390 void
1391 myx_write_txd_tail(struct myx_softc *sc, struct myx_slot *ms, u_int8_t flags,
1392     u_int32_t offset, u_int idx)
1393 {
1394 	struct myx_tx_desc		txd;
1395 	bus_dmamap_t			zmap = sc->sc_zerodma.mxm_map;
1396 	bus_dmamap_t			map = ms->ms_map;
1397 	int				i;
1398 
1399 	for (i = 1; i < map->dm_nsegs; i++) {
1400 		memset(&txd, 0, sizeof(txd));
1401 		txd.tx_addr = htobe64(map->dm_segs[i].ds_addr);
1402 		txd.tx_length = htobe16(map->dm_segs[i].ds_len);
1403 		txd.tx_flags = flags;
1404 
1405 		myx_bus_space_write(sc,
1406 		    offset + sizeof(txd) * ((idx + i) % sc->sc_tx_ring_count),
1407 		    &txd, sizeof(txd));
1408 	}
1409 
1410 	/* pad runt frames */
1411 	if (map->dm_mapsize < 60) {
1412 		memset(&txd, 0, sizeof(txd));
1413 		txd.tx_addr = htobe64(zmap->dm_segs[0].ds_addr);
1414 		txd.tx_length = htobe16(60 - map->dm_mapsize);
1415 		txd.tx_flags = flags;
1416 
1417 		myx_bus_space_write(sc,
1418 		    offset + sizeof(txd) * ((idx + i) % sc->sc_tx_ring_count),
1419 		    &txd, sizeof(txd));
1420 	}
1421 }
1422 
1423 void
1424 myx_start(struct ifnet *ifp)
1425 {
1426 	struct myx_tx_desc		txd;
1427 	struct myx_softc		*sc = ifp->if_softc;
1428 	struct myx_slot			*ms;
1429 	bus_dmamap_t			map;
1430 	struct mbuf			*m;
1431 	u_int32_t			offset = sc->sc_tx_ring_offset;
1432 	u_int				idx, cons, prod;
1433 	u_int				free, used;
1434 	u_int8_t			flags;
1435 
1436 	idx = sc->sc_tx_ring_prod;
1437 
1438 	/* figure out space */
1439 	free = sc->sc_tx_ring_cons;
1440 	if (free <= idx)
1441 		free += sc->sc_tx_ring_count;
1442 	free -= idx;
1443 
1444 	cons = prod = sc->sc_tx_prod;
1445 
1446 	used = 0;
1447 
1448 	for (;;) {
1449 		if (used + sc->sc_tx_nsegs + 1 > free) {
1450 			ifq_set_oactive(&ifp->if_snd);
1451 			break;
1452 		}
1453 
1454 		IFQ_DEQUEUE(&ifp->if_snd, m);
1455 		if (m == NULL)
1456 			break;
1457 
1458 		ms = &sc->sc_tx_slots[prod];
1459 
1460 		if (myx_load_mbuf(sc, ms, m) != 0) {
1461 			m_freem(m);
1462 			ifp->if_oerrors++;
1463 			continue;
1464 		}
1465 
1466 #if NBPFILTER > 0
1467 		if (ifp->if_bpf)
1468 			bpf_mtap(ifp->if_bpf, m, BPF_DIRECTION_OUT);
1469 #endif
1470 
1471 		map = ms->ms_map;
1472 		bus_dmamap_sync(sc->sc_dmat, map, 0,
1473 		    map->dm_mapsize, BUS_DMASYNC_PREWRITE);
1474 
1475 		used += map->dm_nsegs + (map->dm_mapsize < 60 ? 1 : 0);
1476 
1477 		if (++prod >= sc->sc_tx_ring_count)
1478 			prod = 0;
1479 	}
1480 
1481 	if (cons == prod)
1482 		return;
1483 
1484 	ms = &sc->sc_tx_slots[cons];
1485 
1486 	for (;;) {
1487 		idx += ms->ms_map->dm_nsegs +
1488 		    (ms->ms_map->dm_mapsize < 60 ? 1 : 0);
1489 		if (idx >= sc->sc_tx_ring_count)
1490 			idx -= sc->sc_tx_ring_count;
1491 
1492 		if (++cons >= sc->sc_tx_ring_count)
1493 			cons = 0;
1494 
1495 		if (cons == prod)
1496 			break;
1497 
1498 		ms = &sc->sc_tx_slots[cons];
1499 		map = ms->ms_map;
1500 
1501 		flags = MYXTXD_FLAGS_NO_TSO;
1502 		if (map->dm_mapsize < 1520)
1503 			flags |= MYXTXD_FLAGS_SMALL;
1504 
1505 		memset(&txd, 0, sizeof(txd));
1506 		txd.tx_addr = htobe64(map->dm_segs[0].ds_addr);
1507 		txd.tx_length = htobe16(map->dm_segs[0].ds_len);
1508 		txd.tx_nsegs = map->dm_nsegs + (map->dm_mapsize < 60 ? 1 : 0);
1509 		txd.tx_flags = flags | MYXTXD_FLAGS_FIRST;
1510 		myx_bus_space_write(sc,
1511 		    offset + sizeof(txd) * idx, &txd, sizeof(txd));
1512 
1513 		myx_write_txd_tail(sc, ms, flags, offset, idx);
1514 	}
1515 
1516 	/* go back and post first packet */
1517 	ms = &sc->sc_tx_slots[sc->sc_tx_prod];
1518 	map = ms->ms_map;
1519 
1520 	flags = MYXTXD_FLAGS_NO_TSO;
1521 	if (map->dm_mapsize < 1520)
1522 		flags |= MYXTXD_FLAGS_SMALL;
1523 
1524 	memset(&txd, 0, sizeof(txd));
1525 	txd.tx_addr = htobe64(map->dm_segs[0].ds_addr);
1526 	txd.tx_length = htobe16(map->dm_segs[0].ds_len);
1527 	txd.tx_nsegs = map->dm_nsegs + (map->dm_mapsize < 60 ? 1 : 0);
1528 	txd.tx_flags = flags | MYXTXD_FLAGS_FIRST;
1529 
1530 	/* make sure the first descriptor is seen after the others */
1531 	myx_write_txd_tail(sc, ms, flags, offset, sc->sc_tx_ring_prod);
1532 
1533 	myx_bus_space_write(sc,
1534 	    offset + sizeof(txd) * sc->sc_tx_ring_prod, &txd,
1535 	    sizeof(txd) - sizeof(myx_bus_t));
1536 
1537 	bus_space_barrier(sc->sc_memt, sc->sc_memh, offset,
1538 	    sizeof(txd) * sc->sc_tx_ring_count, BUS_SPACE_BARRIER_WRITE);
1539 
1540 	myx_bus_space_write(sc,
1541 	    offset + sizeof(txd) * (sc->sc_tx_ring_prod + 1) -
1542 	    sizeof(myx_bus_t),
1543 	    (u_int8_t *)&txd + sizeof(txd) - sizeof(myx_bus_t),
1544 	    sizeof(myx_bus_t));
1545 
1546 	bus_space_barrier(sc->sc_memt, sc->sc_memh,
1547 	    offset + sizeof(txd) * sc->sc_tx_ring_prod, sizeof(txd),
1548 	    BUS_SPACE_BARRIER_WRITE);
1549 
1550 	/* commit */
1551 	sc->sc_tx_ring_prod = idx;
1552 	sc->sc_tx_prod = prod;
1553 }
1554 
1555 int
1556 myx_load_mbuf(struct myx_softc *sc, struct myx_slot *ms, struct mbuf *m)
1557 {
1558 	bus_dma_tag_t			dmat = sc->sc_dmat;
1559 	bus_dmamap_t			dmap = ms->ms_map;
1560 
1561 	switch (bus_dmamap_load_mbuf(dmat, dmap, m,
1562 	    BUS_DMA_STREAMING | BUS_DMA_NOWAIT)) {
1563 	case 0:
1564 		break;
1565 
1566 	case EFBIG: /* mbuf chain is too fragmented */
1567 		if (m_defrag(m, M_DONTWAIT) == 0 &&
1568 		    bus_dmamap_load_mbuf(dmat, dmap, m,
1569 		    BUS_DMA_STREAMING | BUS_DMA_NOWAIT) == 0)
1570 			break;
1571 	default:
1572 		return (1);
1573 	}
1574 
1575 	ms->ms_m = m;
1576 	return (0);
1577 }
1578 
1579 int
1580 myx_intr(void *arg)
1581 {
1582 	struct myx_softc	*sc = (struct myx_softc *)arg;
1583 	volatile struct myx_status *sts = sc->sc_sts;
1584 	enum myx_state		 state;
1585 	bus_dmamap_t		 map = sc->sc_sts_dma.mxm_map;
1586 	u_int32_t		 data;
1587 	u_int8_t		 valid = 0;
1588 
1589 	state = sc->sc_state;
1590 	if (state == MYX_S_OFF)
1591 		return (0);
1592 
1593 	bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
1594 	    BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
1595 
1596 	valid = sts->ms_isvalid;
1597 	if (valid == 0x0) {
1598 		bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
1599 		    BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1600 		return (0);
1601 	}
1602 
1603 	if (sc->sc_intx) {
1604 		data = htobe32(0);
1605 		bus_space_write_raw_region_4(sc->sc_memt, sc->sc_memh,
1606 		    sc->sc_irqdeassertoff, &data, sizeof(data));
1607 	}
1608 	sts->ms_isvalid = 0;
1609 
1610 	do {
1611 		data = sts->ms_txdonecnt;
1612 
1613 		bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
1614 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE |
1615 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1616 	} while (sts->ms_isvalid);
1617 
1618 	data = betoh32(data);
1619 	if (data != sc->sc_tx_count)
1620 		myx_txeof(sc, data);
1621 
1622 	data = htobe32(3);
1623 	if (valid & 0x1) {
1624 		myx_rxeof(sc);
1625 
1626 		bus_space_write_raw_region_4(sc->sc_memt, sc->sc_memh,
1627 		    sc->sc_irqclaimoff, &data, sizeof(data));
1628 	}
1629 	bus_space_write_raw_region_4(sc->sc_memt, sc->sc_memh,
1630 	    sc->sc_irqclaimoff + sizeof(data), &data, sizeof(data));
1631 
1632 	if (sts->ms_statusupdated) {
1633 		if (state == MYX_S_DOWN &&
1634 		    sc->sc_linkdown != sts->ms_linkdown) {
1635 			sc->sc_state = MYX_S_OFF;
1636 			membar_producer();
1637 			wakeup(sts);
1638 		} else {
1639 			data = sts->ms_linkstate;
1640 			if (data != 0xffffffff) {
1641 				KERNEL_LOCK();
1642 				myx_link_state(sc, data);
1643 				KERNEL_UNLOCK();
1644 			}
1645 		}
1646 	}
1647 
1648 	bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
1649 	    BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1650 
1651 	return (1);
1652 }
1653 
1654 void
1655 myx_refill(void *xmrr)
1656 {
1657 	struct myx_rx_ring *mrr = xmrr;
1658 	struct myx_softc *sc = mrr->mrr_softc;
1659 
1660 	myx_rx_fill(sc, mrr);
1661 
1662 	if (mrr->mrr_prod == mrr->mrr_cons)
1663 		timeout_add(&mrr->mrr_refill, 1);
1664 }
1665 
1666 void
1667 myx_txeof(struct myx_softc *sc, u_int32_t done_count)
1668 {
1669 	struct ifnet *ifp = &sc->sc_ac.ac_if;
1670 	struct myx_slot *ms;
1671 	bus_dmamap_t map;
1672 	u_int idx, cons;
1673 
1674 	idx = sc->sc_tx_ring_cons;
1675 	cons = sc->sc_tx_cons;
1676 
1677 	do {
1678 		ms = &sc->sc_tx_slots[cons];
1679 		map = ms->ms_map;
1680 
1681 		idx += map->dm_nsegs + (map->dm_mapsize < 60 ? 1 : 0);
1682 
1683 		bus_dmamap_sync(sc->sc_dmat, map, 0,
1684 		    map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
1685 		bus_dmamap_unload(sc->sc_dmat, map);
1686 		m_freem(ms->ms_m);
1687 
1688 		ifp->if_opackets++;
1689 
1690 		if (++cons >= sc->sc_tx_ring_count)
1691 			cons = 0;
1692 	} while (++sc->sc_tx_count != done_count);
1693 
1694 	if (idx >= sc->sc_tx_ring_count)
1695 		idx -= sc->sc_tx_ring_count;
1696 
1697 	sc->sc_tx_ring_cons = idx;
1698 	sc->sc_tx_cons = cons;
1699 
1700 	if (ifq_is_oactive(&ifp->if_snd))
1701 		ifq_restart(&ifp->if_snd);
1702 }
1703 
1704 void
1705 myx_rxeof(struct myx_softc *sc)
1706 {
1707 	static const struct myx_intrq_desc zerodesc = { 0, 0 };
1708 	struct ifnet *ifp = &sc->sc_ac.ac_if;
1709 	struct mbuf_list ml = MBUF_LIST_INITIALIZER();
1710 	struct myx_rx_ring *mrr;
1711 	struct myx_slot *ms;
1712 	struct mbuf *m;
1713 	int ring;
1714 	u_int rxfree[2] = { 0 , 0 };
1715 	u_int len;
1716 
1717 	bus_dmamap_sync(sc->sc_dmat, sc->sc_intrq_dma.mxm_map, 0,
1718 	    sc->sc_intrq_dma.mxm_map->dm_mapsize, BUS_DMASYNC_POSTREAD);
1719 
1720 	while ((len = betoh16(sc->sc_intrq[sc->sc_intrq_idx].iq_length)) != 0) {
1721 		sc->sc_intrq[sc->sc_intrq_idx] = zerodesc;
1722 
1723 		if (++sc->sc_intrq_idx >= sc->sc_intrq_count)
1724 			sc->sc_intrq_idx = 0;
1725 
1726 		ring = (len <= (MYX_RXSMALL_SIZE - ETHER_ALIGN)) ?
1727 		    MYX_RXSMALL : MYX_RXBIG;
1728 
1729 		mrr = &sc->sc_rx_ring[ring];
1730 		ms = &mrr->mrr_slots[mrr->mrr_cons];
1731 
1732 		if (++mrr->mrr_cons >= sc->sc_rx_ring_count)
1733 			mrr->mrr_cons = 0;
1734 
1735 		bus_dmamap_sync(sc->sc_dmat, ms->ms_map, 0,
1736 		    ms->ms_map->dm_mapsize, BUS_DMASYNC_POSTREAD);
1737 		bus_dmamap_unload(sc->sc_dmat, ms->ms_map);
1738 
1739 		m = ms->ms_m;
1740 		m->m_data += ETHER_ALIGN;
1741 		m->m_pkthdr.len = m->m_len = len;
1742 
1743 		ml_enqueue(&ml, m);
1744 
1745 		rxfree[ring]++;
1746 	}
1747 
1748 	bus_dmamap_sync(sc->sc_dmat, sc->sc_intrq_dma.mxm_map, 0,
1749 	    sc->sc_intrq_dma.mxm_map->dm_mapsize, BUS_DMASYNC_PREREAD);
1750 
1751 	for (ring = MYX_RXSMALL; ring <= MYX_RXBIG; ring++) {
1752 		if (rxfree[ring] == 0)
1753 			continue;
1754 
1755 		mrr = &sc->sc_rx_ring[ring];
1756 
1757 		if_rxr_put(&mrr->mrr_rxr, rxfree[ring]);
1758 		myx_rx_fill(sc, mrr);
1759 		if (mrr->mrr_prod == mrr->mrr_cons)
1760 			timeout_add(&mrr->mrr_refill, 0);
1761 	}
1762 
1763 	if_input(ifp, &ml);
1764 }
1765 
1766 static int
1767 myx_rx_fill_slots(struct myx_softc *sc, struct myx_rx_ring *mrr, u_int slots)
1768 {
1769 	struct myx_rx_desc rxd;
1770 	struct myx_slot *ms;
1771 	u_int32_t offset = mrr->mrr_offset;
1772 	u_int p, first, fills;
1773 
1774 	first = p = mrr->mrr_prod;
1775 	if (myx_buf_fill(sc, &mrr->mrr_slots[first], mrr->mrr_mclget) != 0)
1776 		return (slots);
1777 
1778 	if (++p >= sc->sc_rx_ring_count)
1779 		p = 0;
1780 
1781 	for (fills = 1; fills < slots; fills++) {
1782 		ms = &mrr->mrr_slots[p];
1783 
1784 		if (myx_buf_fill(sc, ms, mrr->mrr_mclget) != 0)
1785 			break;
1786 
1787 		rxd.rx_addr = htobe64(ms->ms_map->dm_segs[0].ds_addr);
1788 		myx_bus_space_write(sc, offset + p * sizeof(rxd),
1789 		    &rxd, sizeof(rxd));
1790 
1791 		if (++p >= sc->sc_rx_ring_count)
1792 			p = 0;
1793 	}
1794 
1795 	mrr->mrr_prod = p;
1796 
1797 	/* make sure the first descriptor is seen after the others */
1798 	if (fills > 1) {
1799 		bus_space_barrier(sc->sc_memt, sc->sc_memh,
1800 		    offset, sizeof(rxd) * sc->sc_rx_ring_count,
1801 		    BUS_SPACE_BARRIER_WRITE);
1802 	}
1803 
1804 	ms = &mrr->mrr_slots[first];
1805 	rxd.rx_addr = htobe64(ms->ms_map->dm_segs[0].ds_addr);
1806 	myx_bus_space_write(sc, offset + first * sizeof(rxd),
1807 	    &rxd, sizeof(rxd));
1808 
1809 	return (slots - fills);
1810 }
1811 
1812 int
1813 myx_rx_init(struct myx_softc *sc, int ring, bus_size_t size)
1814 {
1815 	struct myx_rx_desc rxd;
1816 	struct myx_rx_ring *mrr = &sc->sc_rx_ring[ring];
1817 	struct myx_slot *ms;
1818 	u_int32_t offset = mrr->mrr_offset;
1819 	int rv;
1820 	int i;
1821 
1822 	mrr->mrr_slots = mallocarray(sizeof(*ms), sc->sc_rx_ring_count,
1823 	    M_DEVBUF, M_WAITOK);
1824 	if (mrr->mrr_slots == NULL)
1825 		return (ENOMEM);
1826 
1827 	memset(&rxd, 0xff, sizeof(rxd));
1828 	for (i = 0; i < sc->sc_rx_ring_count; i++) {
1829 		ms = &mrr->mrr_slots[i];
1830 		rv = bus_dmamap_create(sc->sc_dmat, size, 1, size, 0,
1831 		    BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW, &ms->ms_map);
1832 		if (rv != 0)
1833 			goto destroy;
1834 
1835 		myx_bus_space_write(sc, offset + i * sizeof(rxd),
1836 		    &rxd, sizeof(rxd));
1837 	}
1838 
1839 	if_rxr_init(&mrr->mrr_rxr, 2, sc->sc_rx_ring_count - 2);
1840 	mrr->mrr_prod = mrr->mrr_cons = 0;
1841 
1842 	return (0);
1843 
1844 destroy:
1845 	while (i-- > 0) {
1846 		ms = &mrr->mrr_slots[i];
1847 		bus_dmamap_destroy(sc->sc_dmat, ms->ms_map);
1848 	}
1849 	free(mrr->mrr_slots, M_DEVBUF, sizeof(*ms) * sc->sc_rx_ring_count);
1850 	return (rv);
1851 }
1852 
1853 int
1854 myx_rx_fill(struct myx_softc *sc, struct myx_rx_ring *mrr)
1855 {
1856 	u_int slots;
1857 
1858 	slots = if_rxr_get(&mrr->mrr_rxr, sc->sc_rx_ring_count);
1859 	if (slots == 0)
1860 		return (1);
1861 
1862 	slots = myx_rx_fill_slots(sc, mrr, slots);
1863 	if (slots > 0)
1864 		if_rxr_put(&mrr->mrr_rxr, slots);
1865 
1866 	return (0);
1867 }
1868 
1869 void
1870 myx_rx_empty(struct myx_softc *sc, struct myx_rx_ring *mrr)
1871 {
1872 	struct myx_slot *ms;
1873 
1874 	while (mrr->mrr_cons != mrr->mrr_prod) {
1875 		ms = &mrr->mrr_slots[mrr->mrr_cons];
1876 
1877 		if (++mrr->mrr_cons >= sc->sc_rx_ring_count)
1878 			mrr->mrr_cons = 0;
1879 
1880 		bus_dmamap_sync(sc->sc_dmat, ms->ms_map, 0,
1881 		    ms->ms_map->dm_mapsize, BUS_DMASYNC_POSTREAD);
1882 		bus_dmamap_unload(sc->sc_dmat, ms->ms_map);
1883 		m_freem(ms->ms_m);
1884 	}
1885 
1886 	if_rxr_init(&mrr->mrr_rxr, 2, sc->sc_rx_ring_count - 2);
1887 }
1888 
1889 void
1890 myx_rx_free(struct myx_softc *sc, struct myx_rx_ring *mrr)
1891 {
1892 	struct myx_slot *ms;
1893 	int i;
1894 
1895 	for (i = 0; i < sc->sc_rx_ring_count; i++) {
1896 		ms = &mrr->mrr_slots[i];
1897 		bus_dmamap_destroy(sc->sc_dmat, ms->ms_map);
1898 	}
1899 
1900 	free(mrr->mrr_slots, M_DEVBUF, sizeof(*ms) * sc->sc_rx_ring_count);
1901 }
1902 
1903 struct mbuf *
1904 myx_mcl_small(void)
1905 {
1906 	struct mbuf *m;
1907 
1908 	m = MCLGETI(NULL, M_DONTWAIT, NULL, MYX_RXSMALL_SIZE);
1909 	if (m == NULL)
1910 		return (NULL);
1911 
1912 	m->m_len = m->m_pkthdr.len = MYX_RXSMALL_SIZE;
1913 
1914 	return (m);
1915 }
1916 
1917 struct mbuf *
1918 myx_mcl_big(void)
1919 {
1920 	struct mbuf *m;
1921 	void *mcl;
1922 
1923 	MGETHDR(m, M_DONTWAIT, MT_DATA);
1924 	if (m == NULL)
1925 		return (NULL);
1926 
1927 	mcl = pool_get(myx_mcl_pool, PR_NOWAIT);
1928 	if (mcl == NULL) {
1929 		m_free(m);
1930 		return (NULL);
1931 	}
1932 
1933 	MEXTADD(m, mcl, MYX_RXBIG_SIZE, M_EXTWR, MEXTFREE_POOL, myx_mcl_pool);
1934 	m->m_len = m->m_pkthdr.len = MYX_RXBIG_SIZE;
1935 
1936 	return (m);
1937 }
1938 
1939 int
1940 myx_buf_fill(struct myx_softc *sc, struct myx_slot *ms,
1941     struct mbuf *(*mclget)(void))
1942 {
1943 	struct mbuf *m;
1944 	int rv;
1945 
1946 	m = (*mclget)();
1947 	if (m == NULL)
1948 		return (ENOMEM);
1949 
1950 	rv = bus_dmamap_load_mbuf(sc->sc_dmat, ms->ms_map, m, BUS_DMA_NOWAIT);
1951 	if (rv != 0) {
1952 		m_freem(m);
1953 		return (rv);
1954 	}
1955 
1956 	bus_dmamap_sync(sc->sc_dmat, ms->ms_map, 0,
1957 	    ms->ms_map->dm_mapsize, BUS_DMASYNC_PREREAD);
1958 
1959 	ms->ms_m = m;
1960 
1961 	return (0);
1962 }
1963 
1964 int
1965 myx_tx_init(struct myx_softc *sc, bus_size_t size)
1966 {
1967 	struct myx_slot *ms;
1968 	int rv;
1969 	int i;
1970 
1971 	sc->sc_tx_slots = mallocarray(sizeof(*ms), sc->sc_tx_ring_count,
1972 	    M_DEVBUF, M_WAITOK);
1973 	if (sc->sc_tx_slots == NULL)
1974 		return (ENOMEM);
1975 
1976 	for (i = 0; i < sc->sc_tx_ring_count; i++) {
1977 		ms = &sc->sc_tx_slots[i];
1978 		rv = bus_dmamap_create(sc->sc_dmat, size, sc->sc_tx_nsegs,
1979 		    sc->sc_tx_boundary, sc->sc_tx_boundary,
1980 		    BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW, &ms->ms_map);
1981 		if (rv != 0)
1982 			goto destroy;
1983 	}
1984 
1985 	sc->sc_tx_prod = sc->sc_tx_cons = 0;
1986 
1987 	return (0);
1988 
1989 destroy:
1990 	while (i-- > 0) {
1991 		ms = &sc->sc_tx_slots[i];
1992 		bus_dmamap_destroy(sc->sc_dmat, ms->ms_map);
1993 	}
1994 	free(sc->sc_tx_slots, M_DEVBUF, sizeof(*ms) * sc->sc_tx_ring_count);
1995 	return (rv);
1996 }
1997 
1998 void
1999 myx_tx_empty(struct myx_softc *sc)
2000 {
2001 	struct myx_slot *ms;
2002 	u_int cons = sc->sc_tx_cons;
2003 	u_int prod = sc->sc_tx_prod;
2004 
2005 	while (cons != prod) {
2006 		ms = &sc->sc_tx_slots[cons];
2007 
2008 		bus_dmamap_sync(sc->sc_dmat, ms->ms_map, 0,
2009 		    ms->ms_map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
2010 		bus_dmamap_unload(sc->sc_dmat, ms->ms_map);
2011 		m_freem(ms->ms_m);
2012 
2013 		if (++cons >= sc->sc_tx_ring_count)
2014 			cons = 0;
2015 	}
2016 
2017 	sc->sc_tx_cons = cons;
2018 }
2019 
2020 void
2021 myx_tx_free(struct myx_softc *sc)
2022 {
2023 	struct myx_slot *ms;
2024 	int i;
2025 
2026 	for (i = 0; i < sc->sc_tx_ring_count; i++) {
2027 		ms = &sc->sc_tx_slots[i];
2028 		bus_dmamap_destroy(sc->sc_dmat, ms->ms_map);
2029 	}
2030 
2031 	free(sc->sc_tx_slots, M_DEVBUF, sizeof(*ms) * sc->sc_tx_ring_count);
2032 }
2033