xref: /openbsd-src/sys/dev/pci/if_myx.c (revision 99fd087599a8791921855f21bd7e36130f39aadc)
1 /*	$OpenBSD: if_myx.c,v 1.108 2019/07/03 10:34:59 dlg Exp $	*/
2 
3 /*
4  * Copyright (c) 2007 Reyk Floeter <reyk@openbsd.org>
5  *
6  * Permission to use, copy, modify, and distribute this software for any
7  * purpose with or without fee is hereby granted, provided that the above
8  * copyright notice and this permission notice appear in all copies.
9  *
10  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
11  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
12  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
13  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
14  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
15  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
16  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 /*
20  * Driver for the Myricom Myri-10G Lanai-Z8E Ethernet chipsets.
21  */
22 
23 #include "bpfilter.h"
24 
25 #include <sys/param.h>
26 #include <sys/systm.h>
27 #include <sys/sockio.h>
28 #include <sys/mbuf.h>
29 #include <sys/kernel.h>
30 #include <sys/socket.h>
31 #include <sys/malloc.h>
32 #include <sys/pool.h>
33 #include <sys/timeout.h>
34 #include <sys/device.h>
35 #include <sys/proc.h>
36 #include <sys/queue.h>
37 #include <sys/rwlock.h>
38 
39 #include <machine/bus.h>
40 #include <machine/intr.h>
41 
42 #include <net/if.h>
43 #include <net/if_dl.h>
44 #include <net/if_media.h>
45 
46 #if NBPFILTER > 0
47 #include <net/bpf.h>
48 #endif
49 
50 #include <netinet/in.h>
51 #include <netinet/if_ether.h>
52 
53 #include <dev/pci/pcireg.h>
54 #include <dev/pci/pcivar.h>
55 #include <dev/pci/pcidevs.h>
56 
57 #include <dev/pci/if_myxreg.h>
58 
59 #ifdef MYX_DEBUG
60 #define MYXDBG_INIT	(1<<0)	/* chipset initialization */
61 #define MYXDBG_CMD	(2<<0)	/* commands */
62 #define MYXDBG_INTR	(3<<0)	/* interrupts */
63 #define MYXDBG_ALL	0xffff	/* enable all debugging messages */
64 int myx_debug = MYXDBG_ALL;
65 #define DPRINTF(_lvl, _arg...)	do {					\
66 	if (myx_debug & (_lvl))						\
67 		printf(_arg);						\
68 } while (0)
69 #else
70 #define DPRINTF(_lvl, arg...)
71 #endif
72 
73 #define DEVNAME(_s)	((_s)->sc_dev.dv_xname)
74 
75 struct myx_dmamem {
76 	bus_dmamap_t		 mxm_map;
77 	bus_dma_segment_t	 mxm_seg;
78 	int			 mxm_nsegs;
79 	size_t			 mxm_size;
80 	caddr_t			 mxm_kva;
81 };
82 
83 struct pool *myx_mcl_pool;
84 
85 struct myx_slot {
86 	bus_dmamap_t		 ms_map;
87 	struct mbuf		*ms_m;
88 };
89 
90 struct myx_rx_ring {
91 	struct myx_softc	*mrr_softc;
92 	struct timeout		 mrr_refill;
93 	struct if_rxring	 mrr_rxr;
94 	struct myx_slot		*mrr_slots;
95 	u_int32_t		 mrr_offset;
96 	u_int			 mrr_running;
97 	u_int			 mrr_prod;
98 	u_int			 mrr_cons;
99 	struct mbuf		*(*mrr_mclget)(void);
100 };
101 
102 enum myx_state {
103 	MYX_S_OFF = 0,
104 	MYX_S_RUNNING,
105 	MYX_S_DOWN
106 };
107 
108 struct myx_softc {
109 	struct device		 sc_dev;
110 	struct arpcom		 sc_ac;
111 
112 	pci_chipset_tag_t	 sc_pc;
113 	pci_intr_handle_t	 sc_ih;
114 	pcitag_t		 sc_tag;
115 
116 	bus_dma_tag_t		 sc_dmat;
117 	bus_space_tag_t		 sc_memt;
118 	bus_space_handle_t	 sc_memh;
119 	bus_size_t		 sc_mems;
120 
121 	struct myx_dmamem	 sc_zerodma;
122 	struct myx_dmamem	 sc_cmddma;
123 	struct myx_dmamem	 sc_paddma;
124 
125 	struct myx_dmamem	 sc_sts_dma;
126 	volatile struct myx_status	*sc_sts;
127 
128 	int			 sc_intx;
129 	void			*sc_irqh;
130 	u_int32_t		 sc_irqcoaloff;
131 	u_int32_t		 sc_irqclaimoff;
132 	u_int32_t		 sc_irqdeassertoff;
133 
134 	struct myx_dmamem	 sc_intrq_dma;
135 	struct myx_intrq_desc	*sc_intrq;
136 	u_int			 sc_intrq_count;
137 	u_int			 sc_intrq_idx;
138 
139 	u_int			 sc_rx_ring_count;
140 #define  MYX_RXSMALL		 0
141 #define  MYX_RXBIG		 1
142 	struct myx_rx_ring	 sc_rx_ring[2];
143 
144 	bus_size_t		 sc_tx_boundary;
145 	u_int			 sc_tx_ring_count;
146 	u_int32_t		 sc_tx_ring_offset;
147 	u_int			 sc_tx_nsegs;
148 	u_int32_t		 sc_tx_count; /* shadows ms_txdonecnt */
149 	u_int			 sc_tx_ring_prod;
150 	u_int			 sc_tx_ring_cons;
151 
152 	u_int			 sc_tx_prod;
153 	u_int			 sc_tx_cons;
154 	struct myx_slot		*sc_tx_slots;
155 
156 	struct ifmedia		 sc_media;
157 
158 	volatile enum myx_state	 sc_state;
159 	volatile u_int8_t	 sc_linkdown;
160 
161 	struct rwlock		 sc_sff_lock;
162 };
163 
164 #define MYX_RXSMALL_SIZE	MCLBYTES
165 #define MYX_RXBIG_SIZE		(MYX_MTU - \
166     (ETHER_ALIGN + ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN))
167 
168 int	 myx_match(struct device *, void *, void *);
169 void	 myx_attach(struct device *, struct device *, void *);
170 int	 myx_pcie_dc(struct myx_softc *, struct pci_attach_args *);
171 int	 myx_query(struct myx_softc *sc, char *, size_t);
172 u_int	 myx_ether_aton(char *, u_int8_t *, u_int);
173 void	 myx_attachhook(struct device *);
174 int	 myx_loadfirmware(struct myx_softc *, const char *);
175 int	 myx_probe_firmware(struct myx_softc *);
176 
177 void	 myx_read(struct myx_softc *, bus_size_t, void *, bus_size_t);
178 void	 myx_write(struct myx_softc *, bus_size_t, void *, bus_size_t);
179 
180 #if defined(__LP64__)
181 #define _myx_bus_space_write bus_space_write_raw_region_8
182 typedef u_int64_t myx_bus_t;
183 #else
184 #define _myx_bus_space_write bus_space_write_raw_region_4
185 typedef u_int32_t myx_bus_t;
186 #endif
187 #define myx_bus_space_write(_sc, _o, _a, _l) \
188     _myx_bus_space_write((_sc)->sc_memt, (_sc)->sc_memh, (_o), (_a), (_l))
189 
190 int	 myx_cmd(struct myx_softc *, u_int32_t, struct myx_cmd *, u_int32_t *);
191 int	 myx_boot(struct myx_softc *, u_int32_t);
192 
193 int	 myx_rdma(struct myx_softc *, u_int);
194 int	 myx_dmamem_alloc(struct myx_softc *, struct myx_dmamem *,
195 	    bus_size_t, u_int align);
196 void	 myx_dmamem_free(struct myx_softc *, struct myx_dmamem *);
197 int	 myx_media_change(struct ifnet *);
198 void	 myx_media_status(struct ifnet *, struct ifmediareq *);
199 void	 myx_link_state(struct myx_softc *, u_int32_t);
200 void	 myx_watchdog(struct ifnet *);
201 int	 myx_ioctl(struct ifnet *, u_long, caddr_t);
202 int	 myx_rxrinfo(struct myx_softc *, struct if_rxrinfo *);
203 void	 myx_up(struct myx_softc *);
204 void	 myx_iff(struct myx_softc *);
205 void	 myx_down(struct myx_softc *);
206 int	 myx_get_sffpage(struct myx_softc *, struct if_sffpage *);
207 
208 void	 myx_start(struct ifqueue *);
209 void	 myx_write_txd_tail(struct myx_softc *, struct myx_slot *, u_int8_t,
210 	    u_int32_t, u_int);
211 int	 myx_load_mbuf(struct myx_softc *, struct myx_slot *, struct mbuf *);
212 int	 myx_setlladdr(struct myx_softc *, u_int32_t, u_int8_t *);
213 int	 myx_intr(void *);
214 void	 myx_rxeof(struct myx_softc *);
215 void	 myx_txeof(struct myx_softc *, u_int32_t);
216 
217 int			myx_buf_fill(struct myx_softc *, struct myx_slot *,
218 			    struct mbuf *(*)(void));
219 struct mbuf *		myx_mcl_small(void);
220 struct mbuf *		myx_mcl_big(void);
221 
222 int			myx_rx_init(struct myx_softc *, int, bus_size_t);
223 int			myx_rx_fill(struct myx_softc *, struct myx_rx_ring *);
224 void			myx_rx_empty(struct myx_softc *, struct myx_rx_ring *);
225 void			myx_rx_free(struct myx_softc *, struct myx_rx_ring *);
226 
227 int			myx_tx_init(struct myx_softc *, bus_size_t);
228 void			myx_tx_empty(struct myx_softc *);
229 void			myx_tx_free(struct myx_softc *);
230 
231 void			myx_refill(void *);
232 
233 struct cfdriver myx_cd = {
234 	NULL, "myx", DV_IFNET
235 };
236 struct cfattach myx_ca = {
237 	sizeof(struct myx_softc), myx_match, myx_attach
238 };
239 
240 const struct pci_matchid myx_devices[] = {
241 	{ PCI_VENDOR_MYRICOM, PCI_PRODUCT_MYRICOM_Z8E },
242 	{ PCI_VENDOR_MYRICOM, PCI_PRODUCT_MYRICOM_Z8E_9 }
243 };
244 
245 int
246 myx_match(struct device *parent, void *match, void *aux)
247 {
248 	return (pci_matchbyid(aux, myx_devices, nitems(myx_devices)));
249 }
250 
251 void
252 myx_attach(struct device *parent, struct device *self, void *aux)
253 {
254 	struct myx_softc	*sc = (struct myx_softc *)self;
255 	struct pci_attach_args	*pa = aux;
256 	char			 part[32];
257 	pcireg_t		 memtype;
258 
259 	sc->sc_pc = pa->pa_pc;
260 	sc->sc_tag = pa->pa_tag;
261 	sc->sc_dmat = pa->pa_dmat;
262 
263 	sc->sc_rx_ring[MYX_RXSMALL].mrr_softc = sc;
264 	sc->sc_rx_ring[MYX_RXSMALL].mrr_mclget = myx_mcl_small;
265 	timeout_set(&sc->sc_rx_ring[MYX_RXSMALL].mrr_refill, myx_refill,
266 	    &sc->sc_rx_ring[MYX_RXSMALL]);
267 	sc->sc_rx_ring[MYX_RXBIG].mrr_softc = sc;
268 	sc->sc_rx_ring[MYX_RXBIG].mrr_mclget = myx_mcl_big;
269 	timeout_set(&sc->sc_rx_ring[MYX_RXBIG].mrr_refill, myx_refill,
270 	    &sc->sc_rx_ring[MYX_RXBIG]);
271 
272 	/* Map the PCI memory space */
273 	memtype = pci_mapreg_type(sc->sc_pc, sc->sc_tag, MYXBAR0);
274 	if (pci_mapreg_map(pa, MYXBAR0, memtype, BUS_SPACE_MAP_PREFETCHABLE,
275 	    &sc->sc_memt, &sc->sc_memh, NULL, &sc->sc_mems, 0)) {
276 		printf(": unable to map register memory\n");
277 		return;
278 	}
279 
280 	/* Get board details (mac/part) */
281 	memset(part, 0, sizeof(part));
282 	if (myx_query(sc, part, sizeof(part)) != 0)
283 		goto unmap;
284 
285 	/* Map the interrupt */
286 	if (pci_intr_map_msi(pa, &sc->sc_ih) != 0) {
287 		if (pci_intr_map(pa, &sc->sc_ih) != 0) {
288 			printf(": unable to map interrupt\n");
289 			goto unmap;
290 		}
291 		sc->sc_intx = 1;
292 	}
293 
294 	printf(": %s, model %s, address %s\n",
295 	    pci_intr_string(pa->pa_pc, sc->sc_ih),
296 	    part[0] == '\0' ? "(unknown)" : part,
297 	    ether_sprintf(sc->sc_ac.ac_enaddr));
298 
299 	if (myx_pcie_dc(sc, pa) != 0)
300 		printf("%s: unable to configure PCI Express\n", DEVNAME(sc));
301 
302 	config_mountroot(self, myx_attachhook);
303 
304 	return;
305 
306  unmap:
307 	bus_space_unmap(sc->sc_memt, sc->sc_memh, sc->sc_mems);
308 	sc->sc_mems = 0;
309 }
310 
311 int
312 myx_pcie_dc(struct myx_softc *sc, struct pci_attach_args *pa)
313 {
314 	pcireg_t dcsr;
315 	pcireg_t mask = PCI_PCIE_DCSR_MPS | PCI_PCIE_DCSR_ERO;
316 	pcireg_t dc = ((fls(4096) - 8) << 12) | PCI_PCIE_DCSR_ERO;
317 	int reg;
318 
319 	if (pci_get_capability(sc->sc_pc, pa->pa_tag, PCI_CAP_PCIEXPRESS,
320 	    &reg, NULL) == 0)
321 		return (-1);
322 
323 	reg += PCI_PCIE_DCSR;
324 	dcsr = pci_conf_read(sc->sc_pc, pa->pa_tag, reg);
325 	if ((dcsr & mask) != dc) {
326 		CLR(dcsr, mask);
327 		SET(dcsr, dc);
328 		pci_conf_write(sc->sc_pc, pa->pa_tag, reg, dcsr);
329 	}
330 
331 	return (0);
332 }
333 
334 u_int
335 myx_ether_aton(char *mac, u_int8_t *lladdr, u_int maxlen)
336 {
337 	u_int		i, j;
338 	u_int8_t	digit;
339 
340 	memset(lladdr, 0, ETHER_ADDR_LEN);
341 	for (i = j = 0; mac[i] != '\0' && i < maxlen; i++) {
342 		if (mac[i] >= '0' && mac[i] <= '9')
343 			digit = mac[i] - '0';
344 		else if (mac[i] >= 'A' && mac[i] <= 'F')
345 			digit = mac[i] - 'A' + 10;
346 		else if (mac[i] >= 'a' && mac[i] <= 'f')
347 			digit = mac[i] - 'a' + 10;
348 		else
349 			continue;
350 		if ((j & 1) == 0)
351 			digit <<= 4;
352 		lladdr[j++/2] |= digit;
353 	}
354 
355 	return (i);
356 }
357 
358 int
359 myx_query(struct myx_softc *sc, char *part, size_t partlen)
360 {
361 	struct myx_gen_hdr hdr;
362 	u_int32_t	offset;
363 	u_int8_t	strings[MYX_STRING_SPECS_SIZE];
364 	u_int		i, len, maxlen;
365 
366 	myx_read(sc, MYX_HEADER_POS, &offset, sizeof(offset));
367 	offset = betoh32(offset);
368 	if (offset + sizeof(hdr) > sc->sc_mems) {
369 		printf(": header is outside register window\n");
370 		return (1);
371 	}
372 
373 	myx_read(sc, offset, &hdr, sizeof(hdr));
374 	offset = betoh32(hdr.fw_specs);
375 	len = min(betoh32(hdr.fw_specs_len), sizeof(strings));
376 
377 	bus_space_read_region_1(sc->sc_memt, sc->sc_memh, offset, strings, len);
378 
379 	for (i = 0; i < len; i++) {
380 		maxlen = len - i;
381 		if (strings[i] == '\0')
382 			break;
383 		if (maxlen > 4 && memcmp("MAC=", &strings[i], 4) == 0) {
384 			i += 4;
385 			i += myx_ether_aton(&strings[i],
386 			    sc->sc_ac.ac_enaddr, maxlen);
387 		} else if (maxlen > 3 && memcmp("PC=", &strings[i], 3) == 0) {
388 			i += 3;
389 			i += strlcpy(part, &strings[i], min(maxlen, partlen));
390 		}
391 		for (; i < len; i++) {
392 			if (strings[i] == '\0')
393 				break;
394 		}
395 	}
396 
397 	return (0);
398 }
399 
400 int
401 myx_loadfirmware(struct myx_softc *sc, const char *filename)
402 {
403 	struct myx_gen_hdr	hdr;
404 	u_int8_t		*fw;
405 	size_t			fwlen;
406 	u_int32_t		offset;
407 	u_int			i, ret = 1;
408 
409 	if (loadfirmware(filename, &fw, &fwlen) != 0) {
410 		printf("%s: could not load firmware %s\n", DEVNAME(sc),
411 		    filename);
412 		return (1);
413 	}
414 	if (fwlen > MYX_SRAM_SIZE || fwlen < MYXFW_MIN_LEN) {
415 		printf("%s: invalid firmware %s size\n", DEVNAME(sc), filename);
416 		goto err;
417 	}
418 
419 	memcpy(&offset, fw + MYX_HEADER_POS, sizeof(offset));
420 	offset = betoh32(offset);
421 	if ((offset + sizeof(hdr)) > fwlen) {
422 		printf("%s: invalid firmware %s\n", DEVNAME(sc), filename);
423 		goto err;
424 	}
425 
426 	memcpy(&hdr, fw + offset, sizeof(hdr));
427 	DPRINTF(MYXDBG_INIT, "%s: "
428 	    "fw hdr off %u, length %u, type 0x%x, version %s\n",
429 	    DEVNAME(sc), offset, betoh32(hdr.fw_hdrlength),
430 	    betoh32(hdr.fw_type), hdr.fw_version);
431 
432 	if (betoh32(hdr.fw_type) != MYXFW_TYPE_ETH ||
433 	    memcmp(MYXFW_VER, hdr.fw_version, strlen(MYXFW_VER)) != 0) {
434 		printf("%s: invalid firmware type 0x%x version %s\n",
435 		    DEVNAME(sc), betoh32(hdr.fw_type), hdr.fw_version);
436 		goto err;
437 	}
438 
439 	/* Write the firmware to the card's SRAM */
440 	for (i = 0; i < fwlen; i += 256)
441 		myx_write(sc, i + MYX_FW, fw + i, min(256, fwlen - i));
442 
443 	if (myx_boot(sc, fwlen) != 0) {
444 		printf("%s: failed to boot %s\n", DEVNAME(sc), filename);
445 		goto err;
446 	}
447 
448 	ret = 0;
449 
450 err:
451 	free(fw, M_DEVBUF, fwlen);
452 	return (ret);
453 }
454 
455 void
456 myx_attachhook(struct device *self)
457 {
458 	struct myx_softc	*sc = (struct myx_softc *)self;
459 	struct ifnet		*ifp = &sc->sc_ac.ac_if;
460 	struct myx_cmd		 mc;
461 
462 	/* this is sort of racy */
463 	if (myx_mcl_pool == NULL) {
464 		myx_mcl_pool = malloc(sizeof(*myx_mcl_pool), M_DEVBUF,
465 		    M_WAITOK);
466 
467 		m_pool_init(myx_mcl_pool, MYX_RXBIG_SIZE, MYX_BOUNDARY,
468 		    "myxmcl");
469 		pool_cache_init(myx_mcl_pool);
470 	}
471 
472 	/* Allocate command DMA memory */
473 	if (myx_dmamem_alloc(sc, &sc->sc_cmddma, MYXALIGN_CMD,
474 	    MYXALIGN_CMD) != 0) {
475 		printf("%s: failed to allocate command DMA memory\n",
476 		    DEVNAME(sc));
477 		return;
478 	}
479 
480 	/* Try the firmware stored on disk */
481 	if (myx_loadfirmware(sc, MYXFW_ALIGNED) != 0) {
482 		/* error printed by myx_loadfirmware */
483 		goto freecmd;
484 	}
485 
486 	memset(&mc, 0, sizeof(mc));
487 
488 	if (myx_cmd(sc, MYXCMD_RESET, &mc, NULL) != 0) {
489 		printf("%s: failed to reset the device\n", DEVNAME(sc));
490 		goto freecmd;
491 	}
492 
493 	sc->sc_tx_boundary = 4096;
494 
495 	if (myx_probe_firmware(sc) != 0) {
496 		printf("%s: error while selecting firmware\n", DEVNAME(sc));
497 		goto freecmd;
498 	}
499 
500 	sc->sc_irqh = pci_intr_establish(sc->sc_pc, sc->sc_ih,
501 	    IPL_NET | IPL_MPSAFE, myx_intr, sc, DEVNAME(sc));
502 	if (sc->sc_irqh == NULL) {
503 		printf("%s: unable to establish interrupt\n", DEVNAME(sc));
504 		goto freecmd;
505 	}
506 
507 	ifp->if_softc = sc;
508 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
509 	ifp->if_xflags = IFXF_MPSAFE;
510 	ifp->if_ioctl = myx_ioctl;
511 	ifp->if_qstart = myx_start;
512 	ifp->if_watchdog = myx_watchdog;
513 	ifp->if_hardmtu = MYX_RXBIG_SIZE;
514 	strlcpy(ifp->if_xname, DEVNAME(sc), IFNAMSIZ);
515 	IFQ_SET_MAXLEN(&ifp->if_snd, 1);
516 
517 	ifp->if_capabilities = IFCAP_VLAN_MTU;
518 #if 0
519 	ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING;
520 	ifp->if_capabilities |= IFCAP_CSUM_IPv4 | IFCAP_CSUM_TCPv4 |
521 	    IFCAP_CSUM_UDPv4;
522 #endif
523 
524 	ifmedia_init(&sc->sc_media, 0, myx_media_change, myx_media_status);
525 	ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_AUTO, 0, NULL);
526 	ifmedia_set(&sc->sc_media, IFM_ETHER | IFM_AUTO);
527 
528 	if_attach(ifp);
529 	ether_ifattach(ifp);
530 
531 	return;
532 
533 freecmd:
534 	myx_dmamem_free(sc, &sc->sc_cmddma);
535 }
536 
537 int
538 myx_probe_firmware(struct myx_softc *sc)
539 {
540 	struct myx_dmamem test;
541 	bus_dmamap_t map;
542 	struct myx_cmd mc;
543 	pcireg_t csr;
544 	int offset;
545 	int width = 0;
546 
547 	if (pci_get_capability(sc->sc_pc, sc->sc_tag, PCI_CAP_PCIEXPRESS,
548 	    &offset, NULL)) {
549 		csr = pci_conf_read(sc->sc_pc, sc->sc_tag,
550 		    offset + PCI_PCIE_LCSR);
551 		width = (csr >> 20) & 0x3f;
552 
553 		if (width <= 4) {
554 			/*
555 			 * if the link width is 4 or less we can use the
556 			 * aligned firmware.
557 			 */
558 			return (0);
559 		}
560 	}
561 
562 	if (myx_dmamem_alloc(sc, &test, 4096, 4096) != 0)
563 		return (1);
564 	map = test.mxm_map;
565 
566 	bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
567 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
568 
569 	memset(&mc, 0, sizeof(mc));
570 	mc.mc_data0 = htobe32(MYX_ADDRLOW(map->dm_segs[0].ds_addr));
571 	mc.mc_data1 = htobe32(MYX_ADDRHIGH(map->dm_segs[0].ds_addr));
572 	mc.mc_data2 = htobe32(4096 * 0x10000);
573 	if (myx_cmd(sc, MYXCMD_UNALIGNED_DMA_TEST, &mc, NULL) != 0) {
574 		printf("%s: DMA read test failed\n", DEVNAME(sc));
575 		goto fail;
576 	}
577 
578 	memset(&mc, 0, sizeof(mc));
579 	mc.mc_data0 = htobe32(MYX_ADDRLOW(map->dm_segs[0].ds_addr));
580 	mc.mc_data1 = htobe32(MYX_ADDRHIGH(map->dm_segs[0].ds_addr));
581 	mc.mc_data2 = htobe32(4096 * 0x1);
582 	if (myx_cmd(sc, MYXCMD_UNALIGNED_DMA_TEST, &mc, NULL) != 0) {
583 		printf("%s: DMA write test failed\n", DEVNAME(sc));
584 		goto fail;
585 	}
586 
587 	memset(&mc, 0, sizeof(mc));
588 	mc.mc_data0 = htobe32(MYX_ADDRLOW(map->dm_segs[0].ds_addr));
589 	mc.mc_data1 = htobe32(MYX_ADDRHIGH(map->dm_segs[0].ds_addr));
590 	mc.mc_data2 = htobe32(4096 * 0x10001);
591 	if (myx_cmd(sc, MYXCMD_UNALIGNED_DMA_TEST, &mc, NULL) != 0) {
592 		printf("%s: DMA read/write test failed\n", DEVNAME(sc));
593 		goto fail;
594 	}
595 
596 	bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
597 	    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
598 	myx_dmamem_free(sc, &test);
599 	return (0);
600 
601 fail:
602 	bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
603 	    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
604 	myx_dmamem_free(sc, &test);
605 
606 	if (myx_loadfirmware(sc, MYXFW_UNALIGNED) != 0) {
607 		printf("%s: unable to load %s\n", DEVNAME(sc),
608 		    MYXFW_UNALIGNED);
609 		return (1);
610 	}
611 
612 	sc->sc_tx_boundary = 2048;
613 
614 	printf("%s: using unaligned firmware\n", DEVNAME(sc));
615 	return (0);
616 }
617 
618 void
619 myx_read(struct myx_softc *sc, bus_size_t off, void *ptr, bus_size_t len)
620 {
621 	bus_space_barrier(sc->sc_memt, sc->sc_memh, off, len,
622 	    BUS_SPACE_BARRIER_READ);
623 	bus_space_read_raw_region_4(sc->sc_memt, sc->sc_memh, off, ptr, len);
624 }
625 
626 void
627 myx_write(struct myx_softc *sc, bus_size_t off, void *ptr, bus_size_t len)
628 {
629 	bus_space_write_raw_region_4(sc->sc_memt, sc->sc_memh, off, ptr, len);
630 	bus_space_barrier(sc->sc_memt, sc->sc_memh, off, len,
631 	    BUS_SPACE_BARRIER_WRITE);
632 }
633 
634 int
635 myx_dmamem_alloc(struct myx_softc *sc, struct myx_dmamem *mxm,
636     bus_size_t size, u_int align)
637 {
638 	mxm->mxm_size = size;
639 
640 	if (bus_dmamap_create(sc->sc_dmat, mxm->mxm_size, 1,
641 	    mxm->mxm_size, 0, BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW,
642 	    &mxm->mxm_map) != 0)
643 		return (1);
644 	if (bus_dmamem_alloc(sc->sc_dmat, mxm->mxm_size,
645 	    align, 0, &mxm->mxm_seg, 1, &mxm->mxm_nsegs,
646 	    BUS_DMA_WAITOK | BUS_DMA_ZERO) != 0)
647 		goto destroy;
648 	if (bus_dmamem_map(sc->sc_dmat, &mxm->mxm_seg, mxm->mxm_nsegs,
649 	    mxm->mxm_size, &mxm->mxm_kva, BUS_DMA_WAITOK) != 0)
650 		goto free;
651 	if (bus_dmamap_load(sc->sc_dmat, mxm->mxm_map, mxm->mxm_kva,
652 	    mxm->mxm_size, NULL, BUS_DMA_WAITOK) != 0)
653 		goto unmap;
654 
655 	return (0);
656  unmap:
657 	bus_dmamem_unmap(sc->sc_dmat, mxm->mxm_kva, mxm->mxm_size);
658  free:
659 	bus_dmamem_free(sc->sc_dmat, &mxm->mxm_seg, 1);
660  destroy:
661 	bus_dmamap_destroy(sc->sc_dmat, mxm->mxm_map);
662 	return (1);
663 }
664 
665 void
666 myx_dmamem_free(struct myx_softc *sc, struct myx_dmamem *mxm)
667 {
668 	bus_dmamap_unload(sc->sc_dmat, mxm->mxm_map);
669 	bus_dmamem_unmap(sc->sc_dmat, mxm->mxm_kva, mxm->mxm_size);
670 	bus_dmamem_free(sc->sc_dmat, &mxm->mxm_seg, 1);
671 	bus_dmamap_destroy(sc->sc_dmat, mxm->mxm_map);
672 }
673 
674 int
675 myx_cmd(struct myx_softc *sc, u_int32_t cmd, struct myx_cmd *mc, u_int32_t *r)
676 {
677 	bus_dmamap_t		 map = sc->sc_cmddma.mxm_map;
678 	struct myx_response	*mr;
679 	u_int			 i;
680 	u_int32_t		 result, data;
681 
682 	mc->mc_cmd = htobe32(cmd);
683 	mc->mc_addr_high = htobe32(MYX_ADDRHIGH(map->dm_segs[0].ds_addr));
684 	mc->mc_addr_low = htobe32(MYX_ADDRLOW(map->dm_segs[0].ds_addr));
685 
686 	mr = (struct myx_response *)sc->sc_cmddma.mxm_kva;
687 	mr->mr_result = 0xffffffff;
688 
689 	/* Send command */
690 	myx_write(sc, MYX_CMD, (u_int8_t *)mc, sizeof(struct myx_cmd));
691 	bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
692 	    BUS_DMASYNC_PREREAD);
693 
694 	for (i = 0; i < 20; i++) {
695 		bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
696 		    BUS_DMASYNC_POSTREAD);
697 		result = betoh32(mr->mr_result);
698 		data = betoh32(mr->mr_data);
699 
700 		if (result != 0xffffffff)
701 			break;
702 
703 		bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
704 		    BUS_DMASYNC_PREREAD);
705 		delay(1000);
706 	}
707 
708 	DPRINTF(MYXDBG_CMD, "%s(%s): cmd %u completed, i %d, "
709 	    "result 0x%x, data 0x%x (%u)\n", DEVNAME(sc), __func__,
710 	    cmd, i, result, data, data);
711 
712 	if (result == MYXCMD_OK) {
713 		if (r != NULL)
714 			*r = data;
715 	}
716 
717 	return (result);
718 }
719 
720 int
721 myx_boot(struct myx_softc *sc, u_int32_t length)
722 {
723 	struct myx_bootcmd	 bc;
724 	bus_dmamap_t		 map = sc->sc_cmddma.mxm_map;
725 	u_int32_t		*status;
726 	u_int			 i, ret = 1;
727 
728 	memset(&bc, 0, sizeof(bc));
729 	bc.bc_addr_high = htobe32(MYX_ADDRHIGH(map->dm_segs[0].ds_addr));
730 	bc.bc_addr_low = htobe32(MYX_ADDRLOW(map->dm_segs[0].ds_addr));
731 	bc.bc_result = 0xffffffff;
732 	bc.bc_offset = htobe32(MYX_FW_BOOT);
733 	bc.bc_length = htobe32(length - 8);
734 	bc.bc_copyto = htobe32(8);
735 	bc.bc_jumpto = htobe32(0);
736 
737 	status = (u_int32_t *)sc->sc_cmddma.mxm_kva;
738 	*status = 0;
739 
740 	/* Send command */
741 	myx_write(sc, MYX_BOOT, &bc, sizeof(bc));
742 	bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
743 	    BUS_DMASYNC_PREREAD);
744 
745 	for (i = 0; i < 200; i++) {
746 		bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
747 		    BUS_DMASYNC_POSTREAD);
748 		if (*status == 0xffffffff) {
749 			ret = 0;
750 			break;
751 		}
752 
753 		bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
754 		    BUS_DMASYNC_PREREAD);
755 		delay(1000);
756 	}
757 
758 	DPRINTF(MYXDBG_CMD, "%s: boot completed, i %d, result %d\n",
759 	    DEVNAME(sc), i, ret);
760 
761 	return (ret);
762 }
763 
764 int
765 myx_rdma(struct myx_softc *sc, u_int do_enable)
766 {
767 	struct myx_rdmacmd	 rc;
768 	bus_dmamap_t		 map = sc->sc_cmddma.mxm_map;
769 	bus_dmamap_t		 pad = sc->sc_paddma.mxm_map;
770 	u_int32_t		*status;
771 	int			 ret = 1;
772 	u_int			 i;
773 
774 	/*
775 	 * It is required to setup a _dummy_ RDMA address. It also makes
776 	 * some PCI-E chipsets resend dropped messages.
777 	 */
778 	rc.rc_addr_high = htobe32(MYX_ADDRHIGH(map->dm_segs[0].ds_addr));
779 	rc.rc_addr_low = htobe32(MYX_ADDRLOW(map->dm_segs[0].ds_addr));
780 	rc.rc_result = 0xffffffff;
781 	rc.rc_rdma_high = htobe32(MYX_ADDRHIGH(pad->dm_segs[0].ds_addr));
782 	rc.rc_rdma_low = htobe32(MYX_ADDRLOW(pad->dm_segs[0].ds_addr));
783 	rc.rc_enable = htobe32(do_enable);
784 
785 	status = (u_int32_t *)sc->sc_cmddma.mxm_kva;
786 	*status = 0;
787 
788 	bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
789 	    BUS_DMASYNC_PREREAD);
790 
791 	/* Send command */
792 	myx_write(sc, MYX_RDMA, &rc, sizeof(rc));
793 
794 	for (i = 0; i < 20; i++) {
795 		bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
796 		    BUS_DMASYNC_POSTREAD);
797 
798 		if (*status == 0xffffffff) {
799 			ret = 0;
800 			break;
801 		}
802 
803 		bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
804 		    BUS_DMASYNC_PREREAD);
805 		delay(1000);
806 	}
807 
808 	DPRINTF(MYXDBG_CMD, "%s(%s): dummy RDMA %s, i %d, result 0x%x\n",
809 	    DEVNAME(sc), __func__,
810 	    do_enable ? "enabled" : "disabled", i, betoh32(*status));
811 
812 	return (ret);
813 }
814 
815 int
816 myx_media_change(struct ifnet *ifp)
817 {
818 	/* ignore */
819 	return (0);
820 }
821 
822 void
823 myx_media_status(struct ifnet *ifp, struct ifmediareq *imr)
824 {
825 	struct myx_softc	*sc = (struct myx_softc *)ifp->if_softc;
826 	bus_dmamap_t		 map = sc->sc_sts_dma.mxm_map;
827 	u_int32_t		 sts;
828 
829 	imr->ifm_active = IFM_ETHER | IFM_AUTO;
830 	if (!ISSET(ifp->if_flags, IFF_RUNNING)) {
831 		imr->ifm_status = 0;
832 		return;
833 	}
834 
835 	bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
836 	    BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
837 	sts = sc->sc_sts->ms_linkstate;
838 	bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
839 	    BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
840 
841 	myx_link_state(sc, sts);
842 
843 	imr->ifm_status = IFM_AVALID;
844 	if (!LINK_STATE_IS_UP(ifp->if_link_state))
845 		return;
846 
847 	imr->ifm_active |= IFM_FDX | IFM_FLOW |
848 	    IFM_ETH_RXPAUSE | IFM_ETH_TXPAUSE;
849 	imr->ifm_status |= IFM_ACTIVE;
850 }
851 
852 void
853 myx_link_state(struct myx_softc *sc, u_int32_t sts)
854 {
855 	struct ifnet		*ifp = &sc->sc_ac.ac_if;
856 	int			 link_state = LINK_STATE_DOWN;
857 
858 	if (betoh32(sts) == MYXSTS_LINKUP)
859 		link_state = LINK_STATE_FULL_DUPLEX;
860 	if (ifp->if_link_state != link_state) {
861 		ifp->if_link_state = link_state;
862 		if_link_state_change(ifp);
863 		ifp->if_baudrate = LINK_STATE_IS_UP(ifp->if_link_state) ?
864 		    IF_Gbps(10) : 0;
865 	}
866 }
867 
868 void
869 myx_watchdog(struct ifnet *ifp)
870 {
871 	return;
872 }
873 
874 int
875 myx_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
876 {
877 	struct myx_softc	*sc = (struct myx_softc *)ifp->if_softc;
878 	struct ifreq		*ifr = (struct ifreq *)data;
879 	int			 s, error = 0;
880 
881 	s = splnet();
882 
883 	switch (cmd) {
884 	case SIOCSIFADDR:
885 		ifp->if_flags |= IFF_UP;
886 		/* FALLTHROUGH */
887 
888 	case SIOCSIFFLAGS:
889 		if (ISSET(ifp->if_flags, IFF_UP)) {
890 			if (ISSET(ifp->if_flags, IFF_RUNNING))
891 				error = ENETRESET;
892 			else
893 				myx_up(sc);
894 		} else {
895 			if (ISSET(ifp->if_flags, IFF_RUNNING))
896 				myx_down(sc);
897 		}
898 		break;
899 
900 	case SIOCGIFMEDIA:
901 	case SIOCSIFMEDIA:
902 		error = ifmedia_ioctl(ifp, ifr, &sc->sc_media, cmd);
903 		break;
904 
905 	case SIOCGIFRXR:
906 		error = myx_rxrinfo(sc, (struct if_rxrinfo *)ifr->ifr_data);
907 		break;
908 
909 	case SIOCGIFSFFPAGE:
910 		error = rw_enter(&sc->sc_sff_lock, RW_WRITE|RW_INTR);
911 		if (error != 0)
912 			break;
913 
914 		error = myx_get_sffpage(sc, (struct if_sffpage *)data);
915 		rw_exit(&sc->sc_sff_lock);
916 		break;
917 
918 	default:
919 		error = ether_ioctl(ifp, &sc->sc_ac, cmd, data);
920 	}
921 
922 	if (error == ENETRESET) {
923 		if ((ifp->if_flags & (IFF_UP | IFF_RUNNING)) ==
924 		    (IFF_UP | IFF_RUNNING))
925 			myx_iff(sc);
926 		error = 0;
927 	}
928 
929 	splx(s);
930 	return (error);
931 }
932 
933 int
934 myx_rxrinfo(struct myx_softc *sc, struct if_rxrinfo *ifri)
935 {
936 	struct if_rxring_info ifr[2];
937 
938 	memset(ifr, 0, sizeof(ifr));
939 
940 	ifr[0].ifr_size = MYX_RXSMALL_SIZE;
941 	ifr[0].ifr_info = sc->sc_rx_ring[0].mrr_rxr;
942 
943 	ifr[1].ifr_size = MYX_RXBIG_SIZE;
944 	ifr[1].ifr_info = sc->sc_rx_ring[1].mrr_rxr;
945 
946 	return (if_rxr_info_ioctl(ifri, nitems(ifr), ifr));
947 }
948 
949 static int
950 myx_i2c_byte(struct myx_softc *sc, uint8_t addr, uint8_t off, uint8_t *byte)
951 {
952 	struct myx_cmd		mc;
953 	int			result;
954 	uint32_t		r;
955 	unsigned int		ms;
956 
957 	memset(&mc, 0, sizeof(mc));
958 	mc.mc_data0 = htobe32(0); /* get 1 byte */
959 	mc.mc_data1 = htobe32((addr << 8) | off);
960 	result = myx_cmd(sc, MYXCMD_I2C_READ, &mc, NULL);
961 	if (result != 0)
962 		return (EIO);
963 
964 	for (ms = 0; ms < 50; ms++) {
965 		memset(&mc, 0, sizeof(mc));
966 		mc.mc_data0 = htobe32(off);
967 		result = myx_cmd(sc, MYXCMD_I2C_BYTE, &mc, &r);
968 		switch (result) {
969 		case MYXCMD_OK:
970 			*byte = r;
971 			return (0);
972 		case MYXCMD_ERR_BUSY:
973 			break;
974 		default:
975 			return (EIO);
976 		}
977 
978 		delay(1000);
979 	}
980 
981 	return (EBUSY);
982 }
983 
984 int
985 myx_get_sffpage(struct myx_softc *sc, struct if_sffpage *sff)
986 {
987 	unsigned int		i;
988 	int			result;
989 
990 	if (sff->sff_addr == IFSFF_ADDR_EEPROM) {
991 		uint8_t page;
992 
993 		result = myx_i2c_byte(sc, IFSFF_ADDR_EEPROM, 127, &page);
994 		if (result != 0)
995 			return (result);
996 
997 		if (page != sff->sff_page)
998 			return (ENXIO);
999 	}
1000 
1001 	for (i = 0; i < sizeof(sff->sff_data); i++) {
1002 		result = myx_i2c_byte(sc, sff->sff_addr,
1003 		    i, &sff->sff_data[i]);
1004 		if (result != 0)
1005 			return (result);
1006 	}
1007 
1008 	return (0);
1009 }
1010 
1011 void
1012 myx_up(struct myx_softc *sc)
1013 {
1014 	struct ifnet		*ifp = &sc->sc_ac.ac_if;
1015 	struct myx_cmd		mc;
1016 	bus_dmamap_t		map;
1017 	size_t			size;
1018 	u_int			maxpkt;
1019 	u_int32_t		r;
1020 
1021 	memset(&mc, 0, sizeof(mc));
1022 	if (myx_cmd(sc, MYXCMD_RESET, &mc, NULL) != 0) {
1023 		printf("%s: failed to reset the device\n", DEVNAME(sc));
1024 		return;
1025 	}
1026 
1027 	if (myx_dmamem_alloc(sc, &sc->sc_zerodma,
1028 	    64, MYXALIGN_CMD) != 0) {
1029 		printf("%s: failed to allocate zero pad memory\n",
1030 		    DEVNAME(sc));
1031 		return;
1032 	}
1033 	memset(sc->sc_zerodma.mxm_kva, 0, 64);
1034 	bus_dmamap_sync(sc->sc_dmat, sc->sc_zerodma.mxm_map, 0,
1035 	    sc->sc_zerodma.mxm_map->dm_mapsize, BUS_DMASYNC_PREREAD);
1036 
1037 	if (myx_dmamem_alloc(sc, &sc->sc_paddma,
1038 	    MYXALIGN_CMD, MYXALIGN_CMD) != 0) {
1039 		printf("%s: failed to allocate pad DMA memory\n",
1040 		    DEVNAME(sc));
1041 		goto free_zero;
1042 	}
1043 	bus_dmamap_sync(sc->sc_dmat, sc->sc_paddma.mxm_map, 0,
1044 	    sc->sc_paddma.mxm_map->dm_mapsize,
1045 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1046 
1047 	if (myx_rdma(sc, MYXRDMA_ON) != 0) {
1048 		printf("%s: failed to enable dummy RDMA\n", DEVNAME(sc));
1049 		goto free_pad;
1050 	}
1051 
1052 	if (myx_cmd(sc, MYXCMD_GET_RXRINGSZ, &mc, &r) != 0) {
1053 		printf("%s: unable to get rx ring size\n", DEVNAME(sc));
1054 		goto free_pad;
1055 	}
1056 	sc->sc_rx_ring_count = r / sizeof(struct myx_rx_desc);
1057 
1058 	memset(&mc, 0, sizeof(mc));
1059 	if (myx_cmd(sc, MYXCMD_GET_TXRINGSZ, &mc, &r) != 0) {
1060 		printf("%s: unable to get tx ring size\n", DEVNAME(sc));
1061 		goto free_pad;
1062 	}
1063 	sc->sc_tx_ring_prod = 0;
1064 	sc->sc_tx_ring_cons = 0;
1065 	sc->sc_tx_ring_count = r / sizeof(struct myx_tx_desc);
1066 	sc->sc_tx_nsegs = min(16, sc->sc_tx_ring_count / 4); /* magic */
1067 	sc->sc_tx_count = 0;
1068 	IFQ_SET_MAXLEN(&ifp->if_snd, sc->sc_tx_ring_count - 1);
1069 
1070 	/* Allocate Interrupt Queue */
1071 
1072 	sc->sc_intrq_count = sc->sc_rx_ring_count * 2;
1073 	sc->sc_intrq_idx = 0;
1074 
1075 	size = sc->sc_intrq_count * sizeof(struct myx_intrq_desc);
1076 	if (myx_dmamem_alloc(sc, &sc->sc_intrq_dma,
1077 	    size, MYXALIGN_DATA) != 0) {
1078 		goto free_pad;
1079 	}
1080 	sc->sc_intrq = (struct myx_intrq_desc *)sc->sc_intrq_dma.mxm_kva;
1081 	map = sc->sc_intrq_dma.mxm_map;
1082 	memset(sc->sc_intrq, 0, size);
1083 	bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
1084 	    BUS_DMASYNC_PREREAD);
1085 
1086 	memset(&mc, 0, sizeof(mc));
1087 	mc.mc_data0 = htobe32(size);
1088 	if (myx_cmd(sc, MYXCMD_SET_INTRQSZ, &mc, NULL) != 0) {
1089 		printf("%s: failed to set intrq size\n", DEVNAME(sc));
1090 		goto free_intrq;
1091 	}
1092 
1093 	memset(&mc, 0, sizeof(mc));
1094 	mc.mc_data0 = htobe32(MYX_ADDRLOW(map->dm_segs[0].ds_addr));
1095 	mc.mc_data1 = htobe32(MYX_ADDRHIGH(map->dm_segs[0].ds_addr));
1096 	if (myx_cmd(sc, MYXCMD_SET_INTRQDMA, &mc, NULL) != 0) {
1097 		printf("%s: failed to set intrq address\n", DEVNAME(sc));
1098 		goto free_intrq;
1099 	}
1100 
1101 	/*
1102 	 * get interrupt offsets
1103 	 */
1104 
1105 	memset(&mc, 0, sizeof(mc));
1106 	if (myx_cmd(sc, MYXCMD_GET_INTRACKOFF, &mc,
1107 	    &sc->sc_irqclaimoff) != 0) {
1108 		printf("%s: failed to get IRQ ack offset\n", DEVNAME(sc));
1109 		goto free_intrq;
1110 	}
1111 
1112 	memset(&mc, 0, sizeof(mc));
1113 	if (myx_cmd(sc, MYXCMD_GET_INTRDEASSERTOFF, &mc,
1114 	    &sc->sc_irqdeassertoff) != 0) {
1115 		printf("%s: failed to get IRQ deassert offset\n", DEVNAME(sc));
1116 		goto free_intrq;
1117 	}
1118 
1119 	memset(&mc, 0, sizeof(mc));
1120 	if (myx_cmd(sc, MYXCMD_GET_INTRCOALDELAYOFF, &mc,
1121 	    &sc->sc_irqcoaloff) != 0) {
1122 		printf("%s: failed to get IRQ coal offset\n", DEVNAME(sc));
1123 		goto free_intrq;
1124 	}
1125 
1126 	/* Set an appropriate interrupt coalescing period */
1127 	r = htobe32(MYX_IRQCOALDELAY);
1128 	myx_write(sc, sc->sc_irqcoaloff, &r, sizeof(r));
1129 
1130 	if (myx_setlladdr(sc, MYXCMD_SET_LLADDR, LLADDR(ifp->if_sadl)) != 0) {
1131 		printf("%s: failed to configure lladdr\n", DEVNAME(sc));
1132 		goto free_intrq;
1133 	}
1134 
1135 	memset(&mc, 0, sizeof(mc));
1136 	if (myx_cmd(sc, MYXCMD_UNSET_PROMISC, &mc, NULL) != 0) {
1137 		printf("%s: failed to disable promisc mode\n", DEVNAME(sc));
1138 		goto free_intrq;
1139 	}
1140 
1141 	memset(&mc, 0, sizeof(mc));
1142 	if (myx_cmd(sc, MYXCMD_FC_DEFAULT, &mc, NULL) != 0) {
1143 		printf("%s: failed to configure flow control\n", DEVNAME(sc));
1144 		goto free_intrq;
1145 	}
1146 
1147 	memset(&mc, 0, sizeof(mc));
1148 	if (myx_cmd(sc, MYXCMD_GET_TXRINGOFF, &mc,
1149 	    &sc->sc_tx_ring_offset) != 0) {
1150 		printf("%s: unable to get tx ring offset\n", DEVNAME(sc));
1151 		goto free_intrq;
1152 	}
1153 
1154 	memset(&mc, 0, sizeof(mc));
1155 	if (myx_cmd(sc, MYXCMD_GET_RXSMALLRINGOFF, &mc,
1156 	    &sc->sc_rx_ring[MYX_RXSMALL].mrr_offset) != 0) {
1157 		printf("%s: unable to get small rx ring offset\n", DEVNAME(sc));
1158 		goto free_intrq;
1159 	}
1160 
1161 	memset(&mc, 0, sizeof(mc));
1162 	if (myx_cmd(sc, MYXCMD_GET_RXBIGRINGOFF, &mc,
1163 	    &sc->sc_rx_ring[MYX_RXBIG].mrr_offset) != 0) {
1164 		printf("%s: unable to get big rx ring offset\n", DEVNAME(sc));
1165 		goto free_intrq;
1166 	}
1167 
1168 	/* Allocate Interrupt Data */
1169 	if (myx_dmamem_alloc(sc, &sc->sc_sts_dma,
1170 	    sizeof(struct myx_status), MYXALIGN_DATA) != 0) {
1171 		printf("%s: failed to allocate status DMA memory\n",
1172 		    DEVNAME(sc));
1173 		goto free_intrq;
1174 	}
1175 	sc->sc_sts = (struct myx_status *)sc->sc_sts_dma.mxm_kva;
1176 	map = sc->sc_sts_dma.mxm_map;
1177 	bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
1178 	    BUS_DMASYNC_PREREAD);
1179 
1180 	memset(&mc, 0, sizeof(mc));
1181 	mc.mc_data0 = htobe32(MYX_ADDRLOW(map->dm_segs[0].ds_addr));
1182 	mc.mc_data1 = htobe32(MYX_ADDRHIGH(map->dm_segs[0].ds_addr));
1183 	mc.mc_data2 = htobe32(sizeof(struct myx_status));
1184 	if (myx_cmd(sc, MYXCMD_SET_STATSDMA, &mc, NULL) != 0) {
1185 		printf("%s: failed to set status DMA offset\n", DEVNAME(sc));
1186 		goto free_sts;
1187 	}
1188 
1189 	maxpkt = ifp->if_hardmtu + ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
1190 
1191 	memset(&mc, 0, sizeof(mc));
1192 	mc.mc_data0 = htobe32(maxpkt);
1193 	if (myx_cmd(sc, MYXCMD_SET_MTU, &mc, NULL) != 0) {
1194 		printf("%s: failed to set MTU size %d\n", DEVNAME(sc), maxpkt);
1195 		goto free_sts;
1196 	}
1197 
1198 	if (myx_tx_init(sc, maxpkt) != 0)
1199 		goto free_sts;
1200 
1201 	if (myx_rx_init(sc, MYX_RXSMALL, MCLBYTES) != 0)
1202 		goto free_tx_ring;
1203 
1204 	if (myx_rx_fill(sc, &sc->sc_rx_ring[MYX_RXSMALL]) != 0)
1205 		goto free_rx_ring_small;
1206 
1207 	if (myx_rx_init(sc, MYX_RXBIG, MYX_RXBIG_SIZE) != 0)
1208 		goto empty_rx_ring_small;
1209 
1210 	if (myx_rx_fill(sc, &sc->sc_rx_ring[MYX_RXBIG]) != 0)
1211 		goto free_rx_ring_big;
1212 
1213 	memset(&mc, 0, sizeof(mc));
1214 	mc.mc_data0 = htobe32(MYX_RXSMALL_SIZE - ETHER_ALIGN);
1215 	if (myx_cmd(sc, MYXCMD_SET_SMALLBUFSZ, &mc, NULL) != 0) {
1216 		printf("%s: failed to set small buf size\n", DEVNAME(sc));
1217 		goto empty_rx_ring_big;
1218 	}
1219 
1220 	memset(&mc, 0, sizeof(mc));
1221 	mc.mc_data0 = htobe32(16384);
1222 	if (myx_cmd(sc, MYXCMD_SET_BIGBUFSZ, &mc, NULL) != 0) {
1223 		printf("%s: failed to set big buf size\n", DEVNAME(sc));
1224 		goto empty_rx_ring_big;
1225 	}
1226 
1227 	sc->sc_state = MYX_S_RUNNING;
1228 
1229 	if (myx_cmd(sc, MYXCMD_SET_IFUP, &mc, NULL) != 0) {
1230 		printf("%s: failed to start the device\n", DEVNAME(sc));
1231 		goto empty_rx_ring_big;
1232 	}
1233 
1234 	myx_iff(sc);
1235 	SET(ifp->if_flags, IFF_RUNNING);
1236 	ifq_restart(&ifp->if_snd);
1237 
1238 	return;
1239 
1240 empty_rx_ring_big:
1241 	myx_rx_empty(sc, &sc->sc_rx_ring[MYX_RXBIG]);
1242 free_rx_ring_big:
1243 	myx_rx_free(sc, &sc->sc_rx_ring[MYX_RXBIG]);
1244 empty_rx_ring_small:
1245 	myx_rx_empty(sc, &sc->sc_rx_ring[MYX_RXSMALL]);
1246 free_rx_ring_small:
1247 	myx_rx_free(sc, &sc->sc_rx_ring[MYX_RXSMALL]);
1248 free_tx_ring:
1249 	myx_tx_free(sc);
1250 free_sts:
1251 	bus_dmamap_sync(sc->sc_dmat, sc->sc_sts_dma.mxm_map, 0,
1252 	    sc->sc_sts_dma.mxm_map->dm_mapsize, BUS_DMASYNC_POSTREAD);
1253 	myx_dmamem_free(sc, &sc->sc_sts_dma);
1254 free_intrq:
1255 	bus_dmamap_sync(sc->sc_dmat, sc->sc_intrq_dma.mxm_map, 0,
1256 	    sc->sc_intrq_dma.mxm_map->dm_mapsize, BUS_DMASYNC_POSTREAD);
1257 	myx_dmamem_free(sc, &sc->sc_intrq_dma);
1258 free_pad:
1259 	bus_dmamap_sync(sc->sc_dmat, sc->sc_paddma.mxm_map, 0,
1260 	    sc->sc_paddma.mxm_map->dm_mapsize,
1261 	    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1262 	myx_dmamem_free(sc, &sc->sc_paddma);
1263 
1264 	memset(&mc, 0, sizeof(mc));
1265 	if (myx_cmd(sc, MYXCMD_RESET, &mc, NULL) != 0) {
1266 		printf("%s: failed to reset the device\n", DEVNAME(sc));
1267 	}
1268 free_zero:
1269 	bus_dmamap_sync(sc->sc_dmat, sc->sc_zerodma.mxm_map, 0,
1270 	    sc->sc_zerodma.mxm_map->dm_mapsize, BUS_DMASYNC_POSTREAD);
1271 	myx_dmamem_free(sc, &sc->sc_zerodma);
1272 }
1273 
1274 int
1275 myx_setlladdr(struct myx_softc *sc, u_int32_t cmd, u_int8_t *addr)
1276 {
1277 	struct myx_cmd		 mc;
1278 
1279 	memset(&mc, 0, sizeof(mc));
1280 	mc.mc_data0 = htobe32(addr[0] << 24 | addr[1] << 16 |
1281 	    addr[2] << 8 | addr[3]);
1282 	mc.mc_data1 = htobe32(addr[4] << 8 | addr[5]);
1283 
1284 	if (myx_cmd(sc, cmd, &mc, NULL) != 0) {
1285 		printf("%s: failed to set the lladdr\n", DEVNAME(sc));
1286 		return (-1);
1287 	}
1288 	return (0);
1289 }
1290 
1291 void
1292 myx_iff(struct myx_softc *sc)
1293 {
1294 	struct myx_cmd		mc;
1295 	struct ifnet		*ifp = &sc->sc_ac.ac_if;
1296 	struct ether_multi	*enm;
1297 	struct ether_multistep	step;
1298 	u_int8_t *addr;
1299 
1300 	CLR(ifp->if_flags, IFF_ALLMULTI);
1301 
1302 	if (myx_cmd(sc, ISSET(ifp->if_flags, IFF_PROMISC) ?
1303 	    MYXCMD_SET_PROMISC : MYXCMD_UNSET_PROMISC, &mc, NULL) != 0) {
1304 		printf("%s: failed to configure promisc mode\n", DEVNAME(sc));
1305 		return;
1306 	}
1307 
1308 	if (myx_cmd(sc, MYXCMD_SET_ALLMULTI, &mc, NULL) != 0) {
1309 		printf("%s: failed to enable ALLMULTI\n", DEVNAME(sc));
1310 		return;
1311 	}
1312 
1313 	if (myx_cmd(sc, MYXCMD_UNSET_MCAST, &mc, NULL) != 0) {
1314 		printf("%s: failed to leave all mcast groups \n", DEVNAME(sc));
1315 		return;
1316 	}
1317 
1318 	if (ISSET(ifp->if_flags, IFF_PROMISC) ||
1319 	    sc->sc_ac.ac_multirangecnt > 0) {
1320 		SET(ifp->if_flags, IFF_ALLMULTI);
1321 		return;
1322 	}
1323 
1324 	ETHER_FIRST_MULTI(step, &sc->sc_ac, enm);
1325 	while (enm != NULL) {
1326 		addr = enm->enm_addrlo;
1327 
1328 		memset(&mc, 0, sizeof(mc));
1329 		mc.mc_data0 = htobe32(addr[0] << 24 | addr[1] << 16 |
1330 		    addr[2] << 8 | addr[3]);
1331 		mc.mc_data1 = htobe32(addr[4] << 24 | addr[5] << 16);
1332 		if (myx_cmd(sc, MYXCMD_SET_MCASTGROUP, &mc, NULL) != 0) {
1333 			printf("%s: failed to join mcast group\n", DEVNAME(sc));
1334 			return;
1335 		}
1336 
1337 		ETHER_NEXT_MULTI(step, enm);
1338 	}
1339 
1340 	memset(&mc, 0, sizeof(mc));
1341 	if (myx_cmd(sc, MYXCMD_UNSET_ALLMULTI, &mc, NULL) != 0) {
1342 		printf("%s: failed to disable ALLMULTI\n", DEVNAME(sc));
1343 		return;
1344 	}
1345 }
1346 
1347 void
1348 myx_down(struct myx_softc *sc)
1349 {
1350 	struct ifnet		*ifp = &sc->sc_ac.ac_if;
1351 	volatile struct myx_status *sts = sc->sc_sts;
1352 	bus_dmamap_t		 map = sc->sc_sts_dma.mxm_map;
1353 	struct sleep_state	 sls;
1354 	struct myx_cmd		 mc;
1355 	int			 s;
1356 	int			 ring;
1357 
1358 	CLR(ifp->if_flags, IFF_RUNNING);
1359 
1360 	bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
1361 	    BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
1362 	sc->sc_linkdown = sts->ms_linkdown;
1363 	bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
1364 	    BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1365 
1366 	sc->sc_state = MYX_S_DOWN;
1367 	membar_producer();
1368 
1369 	memset(&mc, 0, sizeof(mc));
1370 	(void)myx_cmd(sc, MYXCMD_SET_IFDOWN, &mc, NULL);
1371 
1372 	while (sc->sc_state != MYX_S_OFF) {
1373 		sleep_setup(&sls, sts, PWAIT, "myxdown");
1374 		membar_consumer();
1375 		sleep_finish(&sls, sc->sc_state != MYX_S_OFF);
1376 	}
1377 
1378 	s = splnet();
1379 	if (ifp->if_link_state != LINK_STATE_UNKNOWN) {
1380 		ifp->if_link_state = LINK_STATE_UNKNOWN;
1381 		ifp->if_baudrate = 0;
1382 		if_link_state_change(ifp);
1383 	}
1384 	splx(s);
1385 
1386 	memset(&mc, 0, sizeof(mc));
1387 	if (myx_cmd(sc, MYXCMD_RESET, &mc, NULL) != 0) {
1388 		printf("%s: failed to reset the device\n", DEVNAME(sc));
1389 	}
1390 
1391 	ifq_clr_oactive(&ifp->if_snd);
1392 	ifq_barrier(&ifp->if_snd);
1393 
1394 	for (ring = 0; ring < 2; ring++) {
1395 		struct myx_rx_ring *mrr = &sc->sc_rx_ring[ring];
1396 
1397 		timeout_del(&mrr->mrr_refill);
1398 		myx_rx_empty(sc, mrr);
1399 		myx_rx_free(sc, mrr);
1400 	}
1401 
1402 	myx_tx_empty(sc);
1403 	myx_tx_free(sc);
1404 
1405 	/* the sleep shizz above already synced this dmamem */
1406 	myx_dmamem_free(sc, &sc->sc_sts_dma);
1407 
1408 	bus_dmamap_sync(sc->sc_dmat, sc->sc_intrq_dma.mxm_map, 0,
1409 	    sc->sc_intrq_dma.mxm_map->dm_mapsize, BUS_DMASYNC_POSTREAD);
1410 	myx_dmamem_free(sc, &sc->sc_intrq_dma);
1411 
1412 	bus_dmamap_sync(sc->sc_dmat, sc->sc_paddma.mxm_map, 0,
1413 	    sc->sc_paddma.mxm_map->dm_mapsize, BUS_DMASYNC_POSTREAD);
1414 	myx_dmamem_free(sc, &sc->sc_paddma);
1415 
1416 	bus_dmamap_sync(sc->sc_dmat, sc->sc_zerodma.mxm_map, 0,
1417 	    sc->sc_zerodma.mxm_map->dm_mapsize, BUS_DMASYNC_POSTREAD);
1418 	myx_dmamem_free(sc, &sc->sc_zerodma);
1419 }
1420 
1421 void
1422 myx_write_txd_tail(struct myx_softc *sc, struct myx_slot *ms, u_int8_t flags,
1423     u_int32_t offset, u_int idx)
1424 {
1425 	struct myx_tx_desc		txd;
1426 	bus_dmamap_t			zmap = sc->sc_zerodma.mxm_map;
1427 	bus_dmamap_t			map = ms->ms_map;
1428 	int				i;
1429 
1430 	for (i = 1; i < map->dm_nsegs; i++) {
1431 		memset(&txd, 0, sizeof(txd));
1432 		txd.tx_addr = htobe64(map->dm_segs[i].ds_addr);
1433 		txd.tx_length = htobe16(map->dm_segs[i].ds_len);
1434 		txd.tx_flags = flags;
1435 
1436 		myx_bus_space_write(sc,
1437 		    offset + sizeof(txd) * ((idx + i) % sc->sc_tx_ring_count),
1438 		    &txd, sizeof(txd));
1439 	}
1440 
1441 	/* pad runt frames */
1442 	if (map->dm_mapsize < 60) {
1443 		memset(&txd, 0, sizeof(txd));
1444 		txd.tx_addr = htobe64(zmap->dm_segs[0].ds_addr);
1445 		txd.tx_length = htobe16(60 - map->dm_mapsize);
1446 		txd.tx_flags = flags;
1447 
1448 		myx_bus_space_write(sc,
1449 		    offset + sizeof(txd) * ((idx + i) % sc->sc_tx_ring_count),
1450 		    &txd, sizeof(txd));
1451 	}
1452 }
1453 
1454 void
1455 myx_start(struct ifqueue *ifq)
1456 {
1457 	struct ifnet			*ifp = ifq->ifq_if;
1458 	struct myx_tx_desc		txd;
1459 	struct myx_softc		*sc = ifp->if_softc;
1460 	struct myx_slot			*ms;
1461 	bus_dmamap_t			map;
1462 	struct mbuf			*m;
1463 	u_int32_t			offset = sc->sc_tx_ring_offset;
1464 	u_int				idx, cons, prod;
1465 	u_int				free, used;
1466 	u_int8_t			flags;
1467 
1468 	idx = sc->sc_tx_ring_prod;
1469 
1470 	/* figure out space */
1471 	free = sc->sc_tx_ring_cons;
1472 	if (free <= idx)
1473 		free += sc->sc_tx_ring_count;
1474 	free -= idx;
1475 
1476 	cons = prod = sc->sc_tx_prod;
1477 
1478 	used = 0;
1479 
1480 	for (;;) {
1481 		if (used + sc->sc_tx_nsegs + 1 > free) {
1482 			ifq_set_oactive(ifq);
1483 			break;
1484 		}
1485 
1486 		m = ifq_dequeue(ifq);
1487 		if (m == NULL)
1488 			break;
1489 
1490 		ms = &sc->sc_tx_slots[prod];
1491 
1492 		if (myx_load_mbuf(sc, ms, m) != 0) {
1493 			m_freem(m);
1494 			ifp->if_oerrors++;
1495 			continue;
1496 		}
1497 
1498 #if NBPFILTER > 0
1499 		if (ifp->if_bpf)
1500 			bpf_mtap(ifp->if_bpf, m, BPF_DIRECTION_OUT);
1501 #endif
1502 
1503 		map = ms->ms_map;
1504 		bus_dmamap_sync(sc->sc_dmat, map, 0,
1505 		    map->dm_mapsize, BUS_DMASYNC_PREWRITE);
1506 
1507 		used += map->dm_nsegs + (map->dm_mapsize < 60 ? 1 : 0);
1508 
1509 		if (++prod >= sc->sc_tx_ring_count)
1510 			prod = 0;
1511 	}
1512 
1513 	if (cons == prod)
1514 		return;
1515 
1516 	ms = &sc->sc_tx_slots[cons];
1517 
1518 	for (;;) {
1519 		idx += ms->ms_map->dm_nsegs +
1520 		    (ms->ms_map->dm_mapsize < 60 ? 1 : 0);
1521 		if (idx >= sc->sc_tx_ring_count)
1522 			idx -= sc->sc_tx_ring_count;
1523 
1524 		if (++cons >= sc->sc_tx_ring_count)
1525 			cons = 0;
1526 
1527 		if (cons == prod)
1528 			break;
1529 
1530 		ms = &sc->sc_tx_slots[cons];
1531 		map = ms->ms_map;
1532 
1533 		flags = MYXTXD_FLAGS_NO_TSO;
1534 		if (map->dm_mapsize < 1520)
1535 			flags |= MYXTXD_FLAGS_SMALL;
1536 
1537 		memset(&txd, 0, sizeof(txd));
1538 		txd.tx_addr = htobe64(map->dm_segs[0].ds_addr);
1539 		txd.tx_length = htobe16(map->dm_segs[0].ds_len);
1540 		txd.tx_nsegs = map->dm_nsegs + (map->dm_mapsize < 60 ? 1 : 0);
1541 		txd.tx_flags = flags | MYXTXD_FLAGS_FIRST;
1542 		myx_bus_space_write(sc,
1543 		    offset + sizeof(txd) * idx, &txd, sizeof(txd));
1544 
1545 		myx_write_txd_tail(sc, ms, flags, offset, idx);
1546 	}
1547 
1548 	/* go back and post first packet */
1549 	ms = &sc->sc_tx_slots[sc->sc_tx_prod];
1550 	map = ms->ms_map;
1551 
1552 	flags = MYXTXD_FLAGS_NO_TSO;
1553 	if (map->dm_mapsize < 1520)
1554 		flags |= MYXTXD_FLAGS_SMALL;
1555 
1556 	memset(&txd, 0, sizeof(txd));
1557 	txd.tx_addr = htobe64(map->dm_segs[0].ds_addr);
1558 	txd.tx_length = htobe16(map->dm_segs[0].ds_len);
1559 	txd.tx_nsegs = map->dm_nsegs + (map->dm_mapsize < 60 ? 1 : 0);
1560 	txd.tx_flags = flags | MYXTXD_FLAGS_FIRST;
1561 
1562 	/* make sure the first descriptor is seen after the others */
1563 	myx_write_txd_tail(sc, ms, flags, offset, sc->sc_tx_ring_prod);
1564 
1565 	myx_bus_space_write(sc,
1566 	    offset + sizeof(txd) * sc->sc_tx_ring_prod, &txd,
1567 	    sizeof(txd) - sizeof(myx_bus_t));
1568 
1569 	bus_space_barrier(sc->sc_memt, sc->sc_memh, offset,
1570 	    sizeof(txd) * sc->sc_tx_ring_count, BUS_SPACE_BARRIER_WRITE);
1571 
1572 	myx_bus_space_write(sc,
1573 	    offset + sizeof(txd) * (sc->sc_tx_ring_prod + 1) -
1574 	    sizeof(myx_bus_t),
1575 	    (u_int8_t *)&txd + sizeof(txd) - sizeof(myx_bus_t),
1576 	    sizeof(myx_bus_t));
1577 
1578 	bus_space_barrier(sc->sc_memt, sc->sc_memh,
1579 	    offset + sizeof(txd) * sc->sc_tx_ring_prod, sizeof(txd),
1580 	    BUS_SPACE_BARRIER_WRITE);
1581 
1582 	/* commit */
1583 	sc->sc_tx_ring_prod = idx;
1584 	sc->sc_tx_prod = prod;
1585 }
1586 
1587 int
1588 myx_load_mbuf(struct myx_softc *sc, struct myx_slot *ms, struct mbuf *m)
1589 {
1590 	bus_dma_tag_t			dmat = sc->sc_dmat;
1591 	bus_dmamap_t			dmap = ms->ms_map;
1592 
1593 	switch (bus_dmamap_load_mbuf(dmat, dmap, m,
1594 	    BUS_DMA_STREAMING | BUS_DMA_NOWAIT)) {
1595 	case 0:
1596 		break;
1597 
1598 	case EFBIG: /* mbuf chain is too fragmented */
1599 		if (m_defrag(m, M_DONTWAIT) == 0 &&
1600 		    bus_dmamap_load_mbuf(dmat, dmap, m,
1601 		    BUS_DMA_STREAMING | BUS_DMA_NOWAIT) == 0)
1602 			break;
1603 	default:
1604 		return (1);
1605 	}
1606 
1607 	ms->ms_m = m;
1608 	return (0);
1609 }
1610 
1611 int
1612 myx_intr(void *arg)
1613 {
1614 	struct myx_softc	*sc = (struct myx_softc *)arg;
1615 	volatile struct myx_status *sts = sc->sc_sts;
1616 	enum myx_state		 state;
1617 	bus_dmamap_t		 map = sc->sc_sts_dma.mxm_map;
1618 	u_int32_t		 data;
1619 	u_int8_t		 valid = 0;
1620 
1621 	state = sc->sc_state;
1622 	if (state == MYX_S_OFF)
1623 		return (0);
1624 
1625 	bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
1626 	    BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
1627 
1628 	valid = sts->ms_isvalid;
1629 	if (valid == 0x0) {
1630 		bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
1631 		    BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1632 		return (0);
1633 	}
1634 
1635 	if (sc->sc_intx) {
1636 		data = htobe32(0);
1637 		bus_space_write_raw_region_4(sc->sc_memt, sc->sc_memh,
1638 		    sc->sc_irqdeassertoff, &data, sizeof(data));
1639 	}
1640 	sts->ms_isvalid = 0;
1641 
1642 	do {
1643 		data = sts->ms_txdonecnt;
1644 
1645 		bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
1646 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE |
1647 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1648 	} while (sts->ms_isvalid);
1649 
1650 	data = betoh32(data);
1651 	if (data != sc->sc_tx_count)
1652 		myx_txeof(sc, data);
1653 
1654 	data = htobe32(3);
1655 	if (valid & 0x1) {
1656 		myx_rxeof(sc);
1657 
1658 		bus_space_write_raw_region_4(sc->sc_memt, sc->sc_memh,
1659 		    sc->sc_irqclaimoff, &data, sizeof(data));
1660 	}
1661 	bus_space_write_raw_region_4(sc->sc_memt, sc->sc_memh,
1662 	    sc->sc_irqclaimoff + sizeof(data), &data, sizeof(data));
1663 
1664 	if (sts->ms_statusupdated) {
1665 		if (state == MYX_S_DOWN &&
1666 		    sc->sc_linkdown != sts->ms_linkdown) {
1667 			sc->sc_state = MYX_S_OFF;
1668 			membar_producer();
1669 			wakeup(sts);
1670 		} else {
1671 			data = sts->ms_linkstate;
1672 			if (data != 0xffffffff) {
1673 				KERNEL_LOCK();
1674 				myx_link_state(sc, data);
1675 				KERNEL_UNLOCK();
1676 			}
1677 		}
1678 	}
1679 
1680 	bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
1681 	    BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1682 
1683 	return (1);
1684 }
1685 
1686 void
1687 myx_refill(void *xmrr)
1688 {
1689 	struct myx_rx_ring *mrr = xmrr;
1690 	struct myx_softc *sc = mrr->mrr_softc;
1691 
1692 	myx_rx_fill(sc, mrr);
1693 
1694 	if (mrr->mrr_prod == mrr->mrr_cons)
1695 		timeout_add(&mrr->mrr_refill, 1);
1696 }
1697 
1698 void
1699 myx_txeof(struct myx_softc *sc, u_int32_t done_count)
1700 {
1701 	struct ifnet *ifp = &sc->sc_ac.ac_if;
1702 	struct myx_slot *ms;
1703 	bus_dmamap_t map;
1704 	u_int idx, cons;
1705 
1706 	idx = sc->sc_tx_ring_cons;
1707 	cons = sc->sc_tx_cons;
1708 
1709 	do {
1710 		ms = &sc->sc_tx_slots[cons];
1711 		map = ms->ms_map;
1712 
1713 		idx += map->dm_nsegs + (map->dm_mapsize < 60 ? 1 : 0);
1714 
1715 		bus_dmamap_sync(sc->sc_dmat, map, 0,
1716 		    map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
1717 		bus_dmamap_unload(sc->sc_dmat, map);
1718 		m_freem(ms->ms_m);
1719 
1720 		if (++cons >= sc->sc_tx_ring_count)
1721 			cons = 0;
1722 	} while (++sc->sc_tx_count != done_count);
1723 
1724 	if (idx >= sc->sc_tx_ring_count)
1725 		idx -= sc->sc_tx_ring_count;
1726 
1727 	sc->sc_tx_ring_cons = idx;
1728 	sc->sc_tx_cons = cons;
1729 
1730 	if (ifq_is_oactive(&ifp->if_snd))
1731 		ifq_restart(&ifp->if_snd);
1732 }
1733 
1734 void
1735 myx_rxeof(struct myx_softc *sc)
1736 {
1737 	static const struct myx_intrq_desc zerodesc = { 0, 0 };
1738 	struct ifnet *ifp = &sc->sc_ac.ac_if;
1739 	struct mbuf_list ml = MBUF_LIST_INITIALIZER();
1740 	struct myx_rx_ring *mrr;
1741 	struct myx_slot *ms;
1742 	struct mbuf *m;
1743 	int ring;
1744 	u_int rxfree[2] = { 0 , 0 };
1745 	u_int len;
1746 	int livelocked;
1747 
1748 	bus_dmamap_sync(sc->sc_dmat, sc->sc_intrq_dma.mxm_map, 0,
1749 	    sc->sc_intrq_dma.mxm_map->dm_mapsize, BUS_DMASYNC_POSTREAD);
1750 
1751 	while ((len = betoh16(sc->sc_intrq[sc->sc_intrq_idx].iq_length)) != 0) {
1752 		sc->sc_intrq[sc->sc_intrq_idx] = zerodesc;
1753 
1754 		if (++sc->sc_intrq_idx >= sc->sc_intrq_count)
1755 			sc->sc_intrq_idx = 0;
1756 
1757 		ring = (len <= (MYX_RXSMALL_SIZE - ETHER_ALIGN)) ?
1758 		    MYX_RXSMALL : MYX_RXBIG;
1759 
1760 		mrr = &sc->sc_rx_ring[ring];
1761 		ms = &mrr->mrr_slots[mrr->mrr_cons];
1762 
1763 		if (++mrr->mrr_cons >= sc->sc_rx_ring_count)
1764 			mrr->mrr_cons = 0;
1765 
1766 		bus_dmamap_sync(sc->sc_dmat, ms->ms_map, 0,
1767 		    ms->ms_map->dm_mapsize, BUS_DMASYNC_POSTREAD);
1768 		bus_dmamap_unload(sc->sc_dmat, ms->ms_map);
1769 
1770 		m = ms->ms_m;
1771 		m->m_data += ETHER_ALIGN;
1772 		m->m_pkthdr.len = m->m_len = len;
1773 
1774 		ml_enqueue(&ml, m);
1775 
1776 		rxfree[ring]++;
1777 	}
1778 
1779 	bus_dmamap_sync(sc->sc_dmat, sc->sc_intrq_dma.mxm_map, 0,
1780 	    sc->sc_intrq_dma.mxm_map->dm_mapsize, BUS_DMASYNC_PREREAD);
1781 
1782 	livelocked = ifiq_input(&ifp->if_rcv, &ml);
1783 	for (ring = MYX_RXSMALL; ring <= MYX_RXBIG; ring++) {
1784 		if (rxfree[ring] == 0)
1785 			continue;
1786 
1787 		mrr = &sc->sc_rx_ring[ring];
1788 
1789 		if (livelocked)
1790 			if_rxr_livelocked(&mrr->mrr_rxr);
1791 
1792 		if_rxr_put(&mrr->mrr_rxr, rxfree[ring]);
1793 		myx_rx_fill(sc, mrr);
1794 		if (mrr->mrr_prod == mrr->mrr_cons)
1795 			timeout_add(&mrr->mrr_refill, 0);
1796 	}
1797 }
1798 
1799 static int
1800 myx_rx_fill_slots(struct myx_softc *sc, struct myx_rx_ring *mrr, u_int slots)
1801 {
1802 	struct myx_rx_desc rxd;
1803 	struct myx_slot *ms;
1804 	u_int32_t offset = mrr->mrr_offset;
1805 	u_int p, first, fills;
1806 
1807 	first = p = mrr->mrr_prod;
1808 	if (myx_buf_fill(sc, &mrr->mrr_slots[first], mrr->mrr_mclget) != 0)
1809 		return (slots);
1810 
1811 	if (++p >= sc->sc_rx_ring_count)
1812 		p = 0;
1813 
1814 	for (fills = 1; fills < slots; fills++) {
1815 		ms = &mrr->mrr_slots[p];
1816 
1817 		if (myx_buf_fill(sc, ms, mrr->mrr_mclget) != 0)
1818 			break;
1819 
1820 		rxd.rx_addr = htobe64(ms->ms_map->dm_segs[0].ds_addr);
1821 		myx_bus_space_write(sc, offset + p * sizeof(rxd),
1822 		    &rxd, sizeof(rxd));
1823 
1824 		if (++p >= sc->sc_rx_ring_count)
1825 			p = 0;
1826 	}
1827 
1828 	mrr->mrr_prod = p;
1829 
1830 	/* make sure the first descriptor is seen after the others */
1831 	if (fills > 1) {
1832 		bus_space_barrier(sc->sc_memt, sc->sc_memh,
1833 		    offset, sizeof(rxd) * sc->sc_rx_ring_count,
1834 		    BUS_SPACE_BARRIER_WRITE);
1835 	}
1836 
1837 	ms = &mrr->mrr_slots[first];
1838 	rxd.rx_addr = htobe64(ms->ms_map->dm_segs[0].ds_addr);
1839 	myx_bus_space_write(sc, offset + first * sizeof(rxd),
1840 	    &rxd, sizeof(rxd));
1841 
1842 	return (slots - fills);
1843 }
1844 
1845 int
1846 myx_rx_init(struct myx_softc *sc, int ring, bus_size_t size)
1847 {
1848 	struct myx_rx_desc rxd;
1849 	struct myx_rx_ring *mrr = &sc->sc_rx_ring[ring];
1850 	struct myx_slot *ms;
1851 	u_int32_t offset = mrr->mrr_offset;
1852 	int rv;
1853 	int i;
1854 
1855 	mrr->mrr_slots = mallocarray(sizeof(*ms), sc->sc_rx_ring_count,
1856 	    M_DEVBUF, M_WAITOK);
1857 	if (mrr->mrr_slots == NULL)
1858 		return (ENOMEM);
1859 
1860 	memset(&rxd, 0xff, sizeof(rxd));
1861 	for (i = 0; i < sc->sc_rx_ring_count; i++) {
1862 		ms = &mrr->mrr_slots[i];
1863 		rv = bus_dmamap_create(sc->sc_dmat, size, 1, size, 0,
1864 		    BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW, &ms->ms_map);
1865 		if (rv != 0)
1866 			goto destroy;
1867 
1868 		myx_bus_space_write(sc, offset + i * sizeof(rxd),
1869 		    &rxd, sizeof(rxd));
1870 	}
1871 
1872 	if_rxr_init(&mrr->mrr_rxr, 2, sc->sc_rx_ring_count - 2);
1873 	mrr->mrr_prod = mrr->mrr_cons = 0;
1874 
1875 	return (0);
1876 
1877 destroy:
1878 	while (i-- > 0) {
1879 		ms = &mrr->mrr_slots[i];
1880 		bus_dmamap_destroy(sc->sc_dmat, ms->ms_map);
1881 	}
1882 	free(mrr->mrr_slots, M_DEVBUF, sizeof(*ms) * sc->sc_rx_ring_count);
1883 	return (rv);
1884 }
1885 
1886 int
1887 myx_rx_fill(struct myx_softc *sc, struct myx_rx_ring *mrr)
1888 {
1889 	u_int slots;
1890 
1891 	slots = if_rxr_get(&mrr->mrr_rxr, sc->sc_rx_ring_count);
1892 	if (slots == 0)
1893 		return (1);
1894 
1895 	slots = myx_rx_fill_slots(sc, mrr, slots);
1896 	if (slots > 0)
1897 		if_rxr_put(&mrr->mrr_rxr, slots);
1898 
1899 	return (0);
1900 }
1901 
1902 void
1903 myx_rx_empty(struct myx_softc *sc, struct myx_rx_ring *mrr)
1904 {
1905 	struct myx_slot *ms;
1906 
1907 	while (mrr->mrr_cons != mrr->mrr_prod) {
1908 		ms = &mrr->mrr_slots[mrr->mrr_cons];
1909 
1910 		if (++mrr->mrr_cons >= sc->sc_rx_ring_count)
1911 			mrr->mrr_cons = 0;
1912 
1913 		bus_dmamap_sync(sc->sc_dmat, ms->ms_map, 0,
1914 		    ms->ms_map->dm_mapsize, BUS_DMASYNC_POSTREAD);
1915 		bus_dmamap_unload(sc->sc_dmat, ms->ms_map);
1916 		m_freem(ms->ms_m);
1917 	}
1918 
1919 	if_rxr_init(&mrr->mrr_rxr, 2, sc->sc_rx_ring_count - 2);
1920 }
1921 
1922 void
1923 myx_rx_free(struct myx_softc *sc, struct myx_rx_ring *mrr)
1924 {
1925 	struct myx_slot *ms;
1926 	int i;
1927 
1928 	for (i = 0; i < sc->sc_rx_ring_count; i++) {
1929 		ms = &mrr->mrr_slots[i];
1930 		bus_dmamap_destroy(sc->sc_dmat, ms->ms_map);
1931 	}
1932 
1933 	free(mrr->mrr_slots, M_DEVBUF, sizeof(*ms) * sc->sc_rx_ring_count);
1934 }
1935 
1936 struct mbuf *
1937 myx_mcl_small(void)
1938 {
1939 	struct mbuf *m;
1940 
1941 	m = MCLGETI(NULL, M_DONTWAIT, NULL, MYX_RXSMALL_SIZE);
1942 	if (m == NULL)
1943 		return (NULL);
1944 
1945 	m->m_len = m->m_pkthdr.len = MYX_RXSMALL_SIZE;
1946 
1947 	return (m);
1948 }
1949 
1950 struct mbuf *
1951 myx_mcl_big(void)
1952 {
1953 	struct mbuf *m;
1954 	void *mcl;
1955 
1956 	MGETHDR(m, M_DONTWAIT, MT_DATA);
1957 	if (m == NULL)
1958 		return (NULL);
1959 
1960 	mcl = pool_get(myx_mcl_pool, PR_NOWAIT);
1961 	if (mcl == NULL) {
1962 		m_free(m);
1963 		return (NULL);
1964 	}
1965 
1966 	MEXTADD(m, mcl, MYX_RXBIG_SIZE, M_EXTWR, MEXTFREE_POOL, myx_mcl_pool);
1967 	m->m_len = m->m_pkthdr.len = MYX_RXBIG_SIZE;
1968 
1969 	return (m);
1970 }
1971 
1972 int
1973 myx_buf_fill(struct myx_softc *sc, struct myx_slot *ms,
1974     struct mbuf *(*mclget)(void))
1975 {
1976 	struct mbuf *m;
1977 	int rv;
1978 
1979 	m = (*mclget)();
1980 	if (m == NULL)
1981 		return (ENOMEM);
1982 
1983 	rv = bus_dmamap_load_mbuf(sc->sc_dmat, ms->ms_map, m, BUS_DMA_NOWAIT);
1984 	if (rv != 0) {
1985 		m_freem(m);
1986 		return (rv);
1987 	}
1988 
1989 	bus_dmamap_sync(sc->sc_dmat, ms->ms_map, 0,
1990 	    ms->ms_map->dm_mapsize, BUS_DMASYNC_PREREAD);
1991 
1992 	ms->ms_m = m;
1993 
1994 	return (0);
1995 }
1996 
1997 int
1998 myx_tx_init(struct myx_softc *sc, bus_size_t size)
1999 {
2000 	struct myx_slot *ms;
2001 	int rv;
2002 	int i;
2003 
2004 	sc->sc_tx_slots = mallocarray(sizeof(*ms), sc->sc_tx_ring_count,
2005 	    M_DEVBUF, M_WAITOK);
2006 	if (sc->sc_tx_slots == NULL)
2007 		return (ENOMEM);
2008 
2009 	for (i = 0; i < sc->sc_tx_ring_count; i++) {
2010 		ms = &sc->sc_tx_slots[i];
2011 		rv = bus_dmamap_create(sc->sc_dmat, size, sc->sc_tx_nsegs,
2012 		    sc->sc_tx_boundary, sc->sc_tx_boundary,
2013 		    BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW, &ms->ms_map);
2014 		if (rv != 0)
2015 			goto destroy;
2016 	}
2017 
2018 	sc->sc_tx_prod = sc->sc_tx_cons = 0;
2019 
2020 	return (0);
2021 
2022 destroy:
2023 	while (i-- > 0) {
2024 		ms = &sc->sc_tx_slots[i];
2025 		bus_dmamap_destroy(sc->sc_dmat, ms->ms_map);
2026 	}
2027 	free(sc->sc_tx_slots, M_DEVBUF, sizeof(*ms) * sc->sc_tx_ring_count);
2028 	return (rv);
2029 }
2030 
2031 void
2032 myx_tx_empty(struct myx_softc *sc)
2033 {
2034 	struct myx_slot *ms;
2035 	u_int cons = sc->sc_tx_cons;
2036 	u_int prod = sc->sc_tx_prod;
2037 
2038 	while (cons != prod) {
2039 		ms = &sc->sc_tx_slots[cons];
2040 
2041 		bus_dmamap_sync(sc->sc_dmat, ms->ms_map, 0,
2042 		    ms->ms_map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
2043 		bus_dmamap_unload(sc->sc_dmat, ms->ms_map);
2044 		m_freem(ms->ms_m);
2045 
2046 		if (++cons >= sc->sc_tx_ring_count)
2047 			cons = 0;
2048 	}
2049 
2050 	sc->sc_tx_cons = cons;
2051 }
2052 
2053 void
2054 myx_tx_free(struct myx_softc *sc)
2055 {
2056 	struct myx_slot *ms;
2057 	int i;
2058 
2059 	for (i = 0; i < sc->sc_tx_ring_count; i++) {
2060 		ms = &sc->sc_tx_slots[i];
2061 		bus_dmamap_destroy(sc->sc_dmat, ms->ms_map);
2062 	}
2063 
2064 	free(sc->sc_tx_slots, M_DEVBUF, sizeof(*ms) * sc->sc_tx_ring_count);
2065 }
2066