xref: /openbsd-src/sys/dev/ic/gem.c (revision 0b7734b3d77bb9b21afec6f4621cae6c805dbd45)
1 /*	$OpenBSD: gem.c,v 1.120 2016/04/13 10:49:26 mpi Exp $	*/
2 /*	$NetBSD: gem.c,v 1.1 2001/09/16 00:11:43 eeh Exp $ */
3 
4 /*
5  *
6  * Copyright (C) 2001 Eduardo Horvath.
7  * All rights reserved.
8  *
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR  ``AS IS'' AND
20  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR  BE LIABLE
23  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
25  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29  * SUCH DAMAGE.
30  *
31  */
32 
33 /*
34  * Driver for Sun GEM ethernet controllers.
35  */
36 
37 #include "bpfilter.h"
38 
39 #include <sys/param.h>
40 #include <sys/systm.h>
41 #include <sys/timeout.h>
42 #include <sys/mbuf.h>
43 #include <sys/syslog.h>
44 #include <sys/malloc.h>
45 #include <sys/kernel.h>
46 #include <sys/socket.h>
47 #include <sys/ioctl.h>
48 #include <sys/errno.h>
49 #include <sys/device.h>
50 #include <sys/endian.h>
51 #include <sys/atomic.h>
52 
53 #include <net/if.h>
54 #include <net/if_media.h>
55 
56 #include <netinet/in.h>
57 #include <netinet/if_ether.h>
58 
59 #if NBPFILTER > 0
60 #include <net/bpf.h>
61 #endif
62 
63 #include <machine/bus.h>
64 #include <machine/intr.h>
65 
66 #include <dev/mii/mii.h>
67 #include <dev/mii/miivar.h>
68 
69 #include <dev/ic/gemreg.h>
70 #include <dev/ic/gemvar.h>
71 
72 #define TRIES	10000
73 
74 struct cfdriver gem_cd = {
75 	NULL, "gem", DV_IFNET
76 };
77 
78 void		gem_start(struct ifnet *);
79 void		gem_stop(struct ifnet *, int);
80 int		gem_ioctl(struct ifnet *, u_long, caddr_t);
81 void		gem_tick(void *);
82 void		gem_watchdog(struct ifnet *);
83 int		gem_init(struct ifnet *);
84 void		gem_init_regs(struct gem_softc *);
85 int		gem_ringsize(int);
86 int		gem_meminit(struct gem_softc *);
87 void		gem_mifinit(struct gem_softc *);
88 int		gem_bitwait(struct gem_softc *, bus_space_handle_t, int,
89 		    u_int32_t, u_int32_t);
90 void		gem_reset(struct gem_softc *);
91 int		gem_reset_rx(struct gem_softc *);
92 int		gem_reset_tx(struct gem_softc *);
93 int		gem_disable_rx(struct gem_softc *);
94 int		gem_disable_tx(struct gem_softc *);
95 void		gem_rx_watchdog(void *);
96 void		gem_rxdrain(struct gem_softc *);
97 void		gem_fill_rx_ring(struct gem_softc *);
98 int		gem_add_rxbuf(struct gem_softc *, int idx);
99 int		gem_load_mbuf(struct gem_softc *, struct gem_sxd *,
100 		    struct mbuf *);
101 void		gem_iff(struct gem_softc *);
102 
103 /* MII methods & callbacks */
104 int		gem_mii_readreg(struct device *, int, int);
105 void		gem_mii_writereg(struct device *, int, int, int);
106 void		gem_mii_statchg(struct device *);
107 int		gem_pcs_readreg(struct device *, int, int);
108 void		gem_pcs_writereg(struct device *, int, int, int);
109 
110 int		gem_mediachange(struct ifnet *);
111 void		gem_mediastatus(struct ifnet *, struct ifmediareq *);
112 
113 int		gem_eint(struct gem_softc *, u_int);
114 int		gem_rint(struct gem_softc *);
115 int		gem_tint(struct gem_softc *, u_int32_t);
116 int		gem_pint(struct gem_softc *);
117 
118 #ifdef GEM_DEBUG
119 #define	DPRINTF(sc, x)	if ((sc)->sc_arpcom.ac_if.if_flags & IFF_DEBUG) \
120 				printf x
121 #else
122 #define	DPRINTF(sc, x)	/* nothing */
123 #endif
124 
125 /*
126  * Attach a Gem interface to the system.
127  */
128 void
129 gem_config(struct gem_softc *sc)
130 {
131 	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
132 	struct mii_data *mii = &sc->sc_mii;
133 	struct mii_softc *child;
134 	int i, error, mii_flags, phyad;
135 	struct ifmedia_entry *ifm;
136 
137 	/* Make sure the chip is stopped. */
138 	ifp->if_softc = sc;
139 	gem_reset(sc);
140 
141 	/*
142 	 * Allocate the control data structures, and create and load the
143 	 * DMA map for it.
144 	 */
145 	if ((error = bus_dmamem_alloc(sc->sc_dmatag,
146 	    sizeof(struct gem_control_data), PAGE_SIZE, 0, &sc->sc_cdseg,
147 	    1, &sc->sc_cdnseg, 0)) != 0) {
148 		printf("\n%s: unable to allocate control data, error = %d\n",
149 		    sc->sc_dev.dv_xname, error);
150 		goto fail_0;
151 	}
152 
153 	/* XXX should map this in with correct endianness */
154 	if ((error = bus_dmamem_map(sc->sc_dmatag, &sc->sc_cdseg, sc->sc_cdnseg,
155 	    sizeof(struct gem_control_data), (caddr_t *)&sc->sc_control_data,
156 	    BUS_DMA_COHERENT)) != 0) {
157 		printf("\n%s: unable to map control data, error = %d\n",
158 		    sc->sc_dev.dv_xname, error);
159 		goto fail_1;
160 	}
161 
162 	if ((error = bus_dmamap_create(sc->sc_dmatag,
163 	    sizeof(struct gem_control_data), 1,
164 	    sizeof(struct gem_control_data), 0, 0, &sc->sc_cddmamap)) != 0) {
165 		printf("\n%s: unable to create control data DMA map, "
166 		    "error = %d\n", sc->sc_dev.dv_xname, error);
167 		goto fail_2;
168 	}
169 
170 	if ((error = bus_dmamap_load(sc->sc_dmatag, sc->sc_cddmamap,
171 	    sc->sc_control_data, sizeof(struct gem_control_data), NULL,
172 	    0)) != 0) {
173 		printf("\n%s: unable to load control data DMA map, error = %d\n",
174 		    sc->sc_dev.dv_xname, error);
175 		goto fail_3;
176 	}
177 
178 	/*
179 	 * Create the receive buffer DMA maps.
180 	 */
181 	for (i = 0; i < GEM_NRXDESC; i++) {
182 		if ((error = bus_dmamap_create(sc->sc_dmatag, MCLBYTES, 1,
183 		    MCLBYTES, 0, 0, &sc->sc_rxsoft[i].rxs_dmamap)) != 0) {
184 			printf("\n%s: unable to create rx DMA map %d, "
185 			    "error = %d\n", sc->sc_dev.dv_xname, i, error);
186 			goto fail_5;
187 		}
188 		sc->sc_rxsoft[i].rxs_mbuf = NULL;
189 	}
190 	/*
191 	 * Create the transmit buffer DMA maps.
192 	 */
193 	for (i = 0; i < GEM_NTXDESC; i++) {
194 		if ((error = bus_dmamap_create(sc->sc_dmatag, MCLBYTES,
195 		    GEM_NTXSEGS, MCLBYTES, 0, BUS_DMA_NOWAIT,
196 		    &sc->sc_txd[i].sd_map)) != 0) {
197 			printf("\n%s: unable to create tx DMA map %d, "
198 			    "error = %d\n", sc->sc_dev.dv_xname, i, error);
199 			goto fail_6;
200 		}
201 		sc->sc_txd[i].sd_mbuf = NULL;
202 	}
203 
204 	/*
205 	 * From this point forward, the attachment cannot fail.  A failure
206 	 * before this point releases all resources that may have been
207 	 * allocated.
208 	 */
209 
210 	/* Announce ourselves. */
211 	printf(", address %s\n", ether_sprintf(sc->sc_arpcom.ac_enaddr));
212 
213 	/* Get RX FIFO size */
214 	sc->sc_rxfifosize = 64 *
215 	    bus_space_read_4(sc->sc_bustag, sc->sc_h1, GEM_RX_FIFO_SIZE);
216 
217 	/* Initialize ifnet structure. */
218 	strlcpy(ifp->if_xname, sc->sc_dev.dv_xname, sizeof ifp->if_xname);
219 	ifp->if_softc = sc;
220 	ifp->if_flags =
221 	    IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
222 	ifp->if_start = gem_start;
223 	ifp->if_ioctl = gem_ioctl;
224 	ifp->if_watchdog = gem_watchdog;
225 	IFQ_SET_MAXLEN(&ifp->if_snd, GEM_NTXDESC - 1);
226 
227 	ifp->if_capabilities = IFCAP_VLAN_MTU;
228 
229 	/* Initialize ifmedia structures and MII info */
230 	mii->mii_ifp = ifp;
231 	mii->mii_readreg = gem_mii_readreg;
232 	mii->mii_writereg = gem_mii_writereg;
233 	mii->mii_statchg = gem_mii_statchg;
234 
235 	ifmedia_init(&mii->mii_media, 0, gem_mediachange, gem_mediastatus);
236 
237 	/* Bad things will happen if we touch this register on ERI. */
238 	if (sc->sc_variant != GEM_SUN_ERI)
239 		bus_space_write_4(sc->sc_bustag, sc->sc_h1,
240 		    GEM_MII_DATAPATH_MODE, 0);
241 
242 	gem_mifinit(sc);
243 
244 	mii_flags = MIIF_DOPAUSE;
245 
246 	/*
247 	 * Look for an external PHY.
248 	 */
249 	if (sc->sc_mif_config & GEM_MIF_CONFIG_MDI1) {
250 		sc->sc_mif_config |= GEM_MIF_CONFIG_PHY_SEL;
251 		bus_space_write_4(sc->sc_bustag, sc->sc_h1,
252 	            GEM_MIF_CONFIG, sc->sc_mif_config);
253 
254 		switch (sc->sc_variant) {
255 		case GEM_SUN_ERI:
256 			phyad = GEM_PHYAD_EXTERNAL;
257 			break;
258 		default:
259 			phyad = MII_PHY_ANY;
260 			break;
261 		}
262 
263 		mii_attach(&sc->sc_dev, mii, 0xffffffff, phyad,
264 		    MII_OFFSET_ANY, mii_flags);
265 	}
266 
267 	/*
268 	 * Fall back on an internal PHY if no external PHY was found.
269 	 * Note that with Apple (K2) GMACs GEM_MIF_CONFIG_MDI0 can't be
270 	 * trusted when the firmware has powered down the chip
271 	 */
272 	child = LIST_FIRST(&mii->mii_phys);
273 	if (child == NULL &&
274 	    (sc->sc_mif_config & GEM_MIF_CONFIG_MDI0 || GEM_IS_APPLE(sc))) {
275 		sc->sc_mif_config &= ~GEM_MIF_CONFIG_PHY_SEL;
276 		bus_space_write_4(sc->sc_bustag, sc->sc_h1,
277 	            GEM_MIF_CONFIG, sc->sc_mif_config);
278 
279 		switch (sc->sc_variant) {
280 		case GEM_SUN_ERI:
281 		case GEM_APPLE_K2_GMAC:
282 			phyad = GEM_PHYAD_INTERNAL;
283 			break;
284 		case GEM_APPLE_GMAC:
285 			phyad = GEM_PHYAD_EXTERNAL;
286 			break;
287 		default:
288 			phyad = MII_PHY_ANY;
289 			break;
290 		}
291 
292 		mii_attach(&sc->sc_dev, mii, 0xffffffff, phyad,
293 		    MII_OFFSET_ANY, mii_flags);
294 	}
295 
296 	/*
297 	 * Try the external PCS SERDES if we didn't find any MII
298 	 * devices.
299 	 */
300 	child = LIST_FIRST(&mii->mii_phys);
301 	if (child == NULL && sc->sc_variant != GEM_SUN_ERI) {
302 		bus_space_write_4(sc->sc_bustag, sc->sc_h1,
303 		    GEM_MII_DATAPATH_MODE, GEM_MII_DATAPATH_SERDES);
304 
305 		bus_space_write_4(sc->sc_bustag, sc->sc_h1,
306 		    GEM_MII_SLINK_CONTROL,
307 		    GEM_MII_SLINK_LOOPBACK|GEM_MII_SLINK_EN_SYNC_D);
308 
309 		bus_space_write_4(sc->sc_bustag, sc->sc_h1,
310 		     GEM_MII_CONFIG, GEM_MII_CONFIG_ENABLE);
311 
312 		mii->mii_readreg = gem_pcs_readreg;
313 		mii->mii_writereg = gem_pcs_writereg;
314 
315 		mii_flags |= MIIF_NOISOLATE;
316 
317 		mii_attach(&sc->sc_dev, mii, 0xffffffff, MII_PHY_ANY,
318 		    MII_OFFSET_ANY, mii_flags);
319 	}
320 
321 	child = LIST_FIRST(&mii->mii_phys);
322 	if (child == NULL) {
323 		/* No PHY attached */
324 		ifmedia_add(&sc->sc_media, IFM_ETHER|IFM_MANUAL, 0, NULL);
325 		ifmedia_set(&sc->sc_media, IFM_ETHER|IFM_MANUAL);
326 	} else {
327 		/*
328 		 * XXX - we can really do the following ONLY if the
329 		 * phy indeed has the auto negotiation capability!!
330 		 */
331 		ifmedia_set(&sc->sc_media, IFM_ETHER|IFM_AUTO);
332 	}
333 
334 	/* Check if we support GigE media. */
335 	TAILQ_FOREACH(ifm, &sc->sc_media.ifm_list, ifm_list) {
336 		if (IFM_SUBTYPE(ifm->ifm_media) == IFM_1000_T ||
337 		    IFM_SUBTYPE(ifm->ifm_media) == IFM_1000_SX ||
338 		    IFM_SUBTYPE(ifm->ifm_media) == IFM_1000_LX ||
339 		    IFM_SUBTYPE(ifm->ifm_media) == IFM_1000_CX) {
340 			sc->sc_flags |= GEM_GIGABIT;
341 			break;
342 		}
343 	}
344 
345 	/* Attach the interface. */
346 	if_attach(ifp);
347 	ether_ifattach(ifp);
348 
349 	timeout_set(&sc->sc_tick_ch, gem_tick, sc);
350 	timeout_set(&sc->sc_rx_watchdog, gem_rx_watchdog, sc);
351 	return;
352 
353 	/*
354 	 * Free any resources we've allocated during the failed attach
355 	 * attempt.  Do this in reverse order and fall through.
356 	 */
357  fail_6:
358 	for (i = 0; i < GEM_NTXDESC; i++) {
359 		if (sc->sc_txd[i].sd_map != NULL)
360 			bus_dmamap_destroy(sc->sc_dmatag,
361 			    sc->sc_txd[i].sd_map);
362 	}
363  fail_5:
364 	for (i = 0; i < GEM_NRXDESC; i++) {
365 		if (sc->sc_rxsoft[i].rxs_dmamap != NULL)
366 			bus_dmamap_destroy(sc->sc_dmatag,
367 			    sc->sc_rxsoft[i].rxs_dmamap);
368 	}
369 	bus_dmamap_unload(sc->sc_dmatag, sc->sc_cddmamap);
370  fail_3:
371 	bus_dmamap_destroy(sc->sc_dmatag, sc->sc_cddmamap);
372  fail_2:
373 	bus_dmamem_unmap(sc->sc_dmatag, (caddr_t)sc->sc_control_data,
374 	    sizeof(struct gem_control_data));
375  fail_1:
376 	bus_dmamem_free(sc->sc_dmatag, &sc->sc_cdseg, sc->sc_cdnseg);
377  fail_0:
378 	return;
379 }
380 
381 void
382 gem_unconfig(struct gem_softc *sc)
383 {
384 	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
385 	int i;
386 
387 	gem_stop(ifp, 1);
388 
389 	for (i = 0; i < GEM_NTXDESC; i++) {
390 		if (sc->sc_txd[i].sd_map != NULL)
391 			bus_dmamap_destroy(sc->sc_dmatag,
392 			    sc->sc_txd[i].sd_map);
393 	}
394 	for (i = 0; i < GEM_NRXDESC; i++) {
395 		if (sc->sc_rxsoft[i].rxs_dmamap != NULL)
396 			bus_dmamap_destroy(sc->sc_dmatag,
397 			    sc->sc_rxsoft[i].rxs_dmamap);
398 	}
399 	bus_dmamap_unload(sc->sc_dmatag, sc->sc_cddmamap);
400 	bus_dmamap_destroy(sc->sc_dmatag, sc->sc_cddmamap);
401 	bus_dmamem_unmap(sc->sc_dmatag, (caddr_t)sc->sc_control_data,
402 	    sizeof(struct gem_control_data));
403 	bus_dmamem_free(sc->sc_dmatag, &sc->sc_cdseg, sc->sc_cdnseg);
404 
405 	/* Detach all PHYs */
406 	mii_detach(&sc->sc_mii, MII_PHY_ANY, MII_OFFSET_ANY);
407 
408 	/* Delete all remaining media. */
409 	ifmedia_delete_instance(&sc->sc_mii.mii_media, IFM_INST_ANY);
410 
411 	ether_ifdetach(ifp);
412 	if_detach(ifp);
413 }
414 
415 
416 void
417 gem_tick(void *arg)
418 {
419 	struct gem_softc *sc = arg;
420 	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
421 	bus_space_tag_t t = sc->sc_bustag;
422 	bus_space_handle_t mac = sc->sc_h1;
423 	int s;
424 	u_int32_t v;
425 
426 	/* unload collisions counters */
427 	v = bus_space_read_4(t, mac, GEM_MAC_EXCESS_COLL_CNT) +
428 	    bus_space_read_4(t, mac, GEM_MAC_LATE_COLL_CNT);
429 	ifp->if_collisions += v +
430 	    bus_space_read_4(t, mac, GEM_MAC_NORM_COLL_CNT) +
431 	    bus_space_read_4(t, mac, GEM_MAC_FIRST_COLL_CNT);
432 	ifp->if_oerrors += v;
433 
434 	/* read error counters */
435 	ifp->if_ierrors +=
436 	    bus_space_read_4(t, mac, GEM_MAC_RX_LEN_ERR_CNT) +
437 	    bus_space_read_4(t, mac, GEM_MAC_RX_ALIGN_ERR) +
438 	    bus_space_read_4(t, mac, GEM_MAC_RX_CRC_ERR_CNT) +
439 	    bus_space_read_4(t, mac, GEM_MAC_RX_CODE_VIOL);
440 
441 	/* clear the hardware counters */
442 	bus_space_write_4(t, mac, GEM_MAC_NORM_COLL_CNT, 0);
443 	bus_space_write_4(t, mac, GEM_MAC_FIRST_COLL_CNT, 0);
444 	bus_space_write_4(t, mac, GEM_MAC_EXCESS_COLL_CNT, 0);
445 	bus_space_write_4(t, mac, GEM_MAC_LATE_COLL_CNT, 0);
446 	bus_space_write_4(t, mac, GEM_MAC_RX_LEN_ERR_CNT, 0);
447 	bus_space_write_4(t, mac, GEM_MAC_RX_ALIGN_ERR, 0);
448 	bus_space_write_4(t, mac, GEM_MAC_RX_CRC_ERR_CNT, 0);
449 	bus_space_write_4(t, mac, GEM_MAC_RX_CODE_VIOL, 0);
450 
451 	s = splnet();
452 	mii_tick(&sc->sc_mii);
453 	splx(s);
454 
455 	timeout_add_sec(&sc->sc_tick_ch, 1);
456 }
457 
458 int
459 gem_bitwait(struct gem_softc *sc, bus_space_handle_t h, int r,
460    u_int32_t clr, u_int32_t set)
461 {
462 	int i;
463 	u_int32_t reg;
464 
465 	for (i = TRIES; i--; DELAY(100)) {
466 		reg = bus_space_read_4(sc->sc_bustag, h, r);
467 		if ((reg & clr) == 0 && (reg & set) == set)
468 			return (1);
469 	}
470 
471 	return (0);
472 }
473 
474 void
475 gem_reset(struct gem_softc *sc)
476 {
477 	bus_space_tag_t t = sc->sc_bustag;
478 	bus_space_handle_t h = sc->sc_h2;
479 	int s;
480 
481 	s = splnet();
482 	DPRINTF(sc, ("%s: gem_reset\n", sc->sc_dev.dv_xname));
483 	gem_reset_rx(sc);
484 	gem_reset_tx(sc);
485 
486 	/* Do a full reset */
487 	bus_space_write_4(t, h, GEM_RESET, GEM_RESET_RX|GEM_RESET_TX);
488 	if (!gem_bitwait(sc, h, GEM_RESET, GEM_RESET_RX | GEM_RESET_TX, 0))
489 		printf("%s: cannot reset device\n", sc->sc_dev.dv_xname);
490 	splx(s);
491 }
492 
493 
494 /*
495  * Drain the receive queue.
496  */
497 void
498 gem_rxdrain(struct gem_softc *sc)
499 {
500 	struct gem_rxsoft *rxs;
501 	int i;
502 
503 	for (i = 0; i < GEM_NRXDESC; i++) {
504 		rxs = &sc->sc_rxsoft[i];
505 		if (rxs->rxs_mbuf != NULL) {
506 			bus_dmamap_sync(sc->sc_dmatag, rxs->rxs_dmamap, 0,
507 			    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
508 			bus_dmamap_unload(sc->sc_dmatag, rxs->rxs_dmamap);
509 			m_freem(rxs->rxs_mbuf);
510 			rxs->rxs_mbuf = NULL;
511 		}
512 	}
513 	sc->sc_rx_prod = sc->sc_rx_cons = 0;
514 }
515 
516 /*
517  * Reset the whole thing.
518  */
519 void
520 gem_stop(struct ifnet *ifp, int softonly)
521 {
522 	struct gem_softc *sc = (struct gem_softc *)ifp->if_softc;
523 	struct gem_sxd *sd;
524 	u_int32_t i;
525 
526 	DPRINTF(sc, ("%s: gem_stop\n", sc->sc_dev.dv_xname));
527 
528 	timeout_del(&sc->sc_tick_ch);
529 
530 	/*
531 	 * Mark the interface down and cancel the watchdog timer.
532 	 */
533 	ifp->if_flags &= ~IFF_RUNNING;
534 	ifq_clr_oactive(&ifp->if_snd);
535 	ifp->if_timer = 0;
536 
537 	if (!softonly) {
538 		mii_down(&sc->sc_mii);
539 
540 		gem_reset_rx(sc);
541 		gem_reset_tx(sc);
542 	}
543 
544 	intr_barrier(sc->sc_ih);
545 
546 	KASSERT((ifp->if_flags & IFF_RUNNING) == 0);
547 
548 	/*
549 	 * Release any queued transmit buffers.
550 	 */
551 	for (i = 0; i < GEM_NTXDESC; i++) {
552 		sd = &sc->sc_txd[i];
553 		if (sd->sd_mbuf != NULL) {
554 			bus_dmamap_sync(sc->sc_dmatag, sd->sd_map, 0,
555 			    sd->sd_map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
556 			bus_dmamap_unload(sc->sc_dmatag, sd->sd_map);
557 			m_freem(sd->sd_mbuf);
558 			sd->sd_mbuf = NULL;
559 		}
560 	}
561 	sc->sc_tx_cnt = sc->sc_tx_prod = sc->sc_tx_cons = 0;
562 
563 	gem_rxdrain(sc);
564 }
565 
566 
567 /*
568  * Reset the receiver
569  */
570 int
571 gem_reset_rx(struct gem_softc *sc)
572 {
573 	bus_space_tag_t t = sc->sc_bustag;
574 	bus_space_handle_t h = sc->sc_h1, h2 = sc->sc_h2;
575 
576 	/*
577 	 * Resetting while DMA is in progress can cause a bus hang, so we
578 	 * disable DMA first.
579 	 */
580 	gem_disable_rx(sc);
581 	bus_space_write_4(t, h, GEM_RX_CONFIG, 0);
582 	/* Wait till it finishes */
583 	if (!gem_bitwait(sc, h, GEM_RX_CONFIG, 1, 0))
584 		printf("%s: cannot disable rx dma\n", sc->sc_dev.dv_xname);
585 	/* Wait 5ms extra. */
586 	delay(5000);
587 
588 	/* Finally, reset the ERX */
589 	bus_space_write_4(t, h2, GEM_RESET, GEM_RESET_RX);
590 	/* Wait till it finishes */
591 	if (!gem_bitwait(sc, h2, GEM_RESET, GEM_RESET_RX, 0)) {
592 		printf("%s: cannot reset receiver\n", sc->sc_dev.dv_xname);
593 		return (1);
594 	}
595 	return (0);
596 }
597 
598 
599 /*
600  * Reset the transmitter
601  */
602 int
603 gem_reset_tx(struct gem_softc *sc)
604 {
605 	bus_space_tag_t t = sc->sc_bustag;
606 	bus_space_handle_t h = sc->sc_h1, h2 = sc->sc_h2;
607 
608 	/*
609 	 * Resetting while DMA is in progress can cause a bus hang, so we
610 	 * disable DMA first.
611 	 */
612 	gem_disable_tx(sc);
613 	bus_space_write_4(t, h, GEM_TX_CONFIG, 0);
614 	/* Wait till it finishes */
615 	if (!gem_bitwait(sc, h, GEM_TX_CONFIG, 1, 0))
616 		printf("%s: cannot disable tx dma\n", sc->sc_dev.dv_xname);
617 	/* Wait 5ms extra. */
618 	delay(5000);
619 
620 	/* Finally, reset the ETX */
621 	bus_space_write_4(t, h2, GEM_RESET, GEM_RESET_TX);
622 	/* Wait till it finishes */
623 	if (!gem_bitwait(sc, h2, GEM_RESET, GEM_RESET_TX, 0)) {
624 		printf("%s: cannot reset transmitter\n",
625 			sc->sc_dev.dv_xname);
626 		return (1);
627 	}
628 	return (0);
629 }
630 
631 /*
632  * Disable receiver.
633  */
634 int
635 gem_disable_rx(struct gem_softc *sc)
636 {
637 	bus_space_tag_t t = sc->sc_bustag;
638 	bus_space_handle_t h = sc->sc_h1;
639 	u_int32_t cfg;
640 
641 	/* Flip the enable bit */
642 	cfg = bus_space_read_4(t, h, GEM_MAC_RX_CONFIG);
643 	cfg &= ~GEM_MAC_RX_ENABLE;
644 	bus_space_write_4(t, h, GEM_MAC_RX_CONFIG, cfg);
645 
646 	/* Wait for it to finish */
647 	return (gem_bitwait(sc, h, GEM_MAC_RX_CONFIG, GEM_MAC_RX_ENABLE, 0));
648 }
649 
650 /*
651  * Disable transmitter.
652  */
653 int
654 gem_disable_tx(struct gem_softc *sc)
655 {
656 	bus_space_tag_t t = sc->sc_bustag;
657 	bus_space_handle_t h = sc->sc_h1;
658 	u_int32_t cfg;
659 
660 	/* Flip the enable bit */
661 	cfg = bus_space_read_4(t, h, GEM_MAC_TX_CONFIG);
662 	cfg &= ~GEM_MAC_TX_ENABLE;
663 	bus_space_write_4(t, h, GEM_MAC_TX_CONFIG, cfg);
664 
665 	/* Wait for it to finish */
666 	return (gem_bitwait(sc, h, GEM_MAC_TX_CONFIG, GEM_MAC_TX_ENABLE, 0));
667 }
668 
669 /*
670  * Initialize interface.
671  */
672 int
673 gem_meminit(struct gem_softc *sc)
674 {
675 	int i;
676 
677 	/*
678 	 * Initialize the transmit descriptor ring.
679 	 */
680 	for (i = 0; i < GEM_NTXDESC; i++) {
681 		sc->sc_txdescs[i].gd_flags = 0;
682 		sc->sc_txdescs[i].gd_addr = 0;
683 	}
684 	GEM_CDTXSYNC(sc, 0, GEM_NTXDESC,
685 	    BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
686 
687 	/*
688 	 * Initialize the receive descriptor and receive job
689 	 * descriptor rings.
690 	 */
691 	for (i = 0; i < GEM_NRXDESC; i++) {
692 		sc->sc_rxdescs[i].gd_flags = 0;
693 		sc->sc_rxdescs[i].gd_addr = 0;
694 	}
695 	/* Hardware reads RX descriptors in multiples of four. */
696 	if_rxr_init(&sc->sc_rx_ring, 4, GEM_NRXDESC - 4);
697 	gem_fill_rx_ring(sc);
698 
699 	return (0);
700 }
701 
702 int
703 gem_ringsize(int sz)
704 {
705 	switch (sz) {
706 	case 32:
707 		return GEM_RING_SZ_32;
708 	case 64:
709 		return GEM_RING_SZ_64;
710 	case 128:
711 		return GEM_RING_SZ_128;
712 	case 256:
713 		return GEM_RING_SZ_256;
714 	case 512:
715 		return GEM_RING_SZ_512;
716 	case 1024:
717 		return GEM_RING_SZ_1024;
718 	case 2048:
719 		return GEM_RING_SZ_2048;
720 	case 4096:
721 		return GEM_RING_SZ_4096;
722 	case 8192:
723 		return GEM_RING_SZ_8192;
724 	default:
725 		printf("gem: invalid Receive Descriptor ring size %d\n", sz);
726 		return GEM_RING_SZ_32;
727 	}
728 }
729 
730 /*
731  * Initialization of interface; set up initialization block
732  * and transmit/receive descriptor rings.
733  */
734 int
735 gem_init(struct ifnet *ifp)
736 {
737 
738 	struct gem_softc *sc = (struct gem_softc *)ifp->if_softc;
739 	bus_space_tag_t t = sc->sc_bustag;
740 	bus_space_handle_t h = sc->sc_h1;
741 	int s;
742 	u_int32_t v;
743 
744 	s = splnet();
745 
746 	DPRINTF(sc, ("%s: gem_init: calling stop\n", sc->sc_dev.dv_xname));
747 	/*
748 	 * Initialization sequence. The numbered steps below correspond
749 	 * to the sequence outlined in section 6.3.5.1 in the Ethernet
750 	 * Channel Engine manual (part of the PCIO manual).
751 	 * See also the STP2002-STQ document from Sun Microsystems.
752 	 */
753 
754 	/* step 1 & 2. Reset the Ethernet Channel */
755 	gem_stop(ifp, 0);
756 	gem_reset(sc);
757 	DPRINTF(sc, ("%s: gem_init: restarting\n", sc->sc_dev.dv_xname));
758 
759 	/* Re-initialize the MIF */
760 	gem_mifinit(sc);
761 
762 	/* Call MI reset function if any */
763 	if (sc->sc_hwreset)
764 		(*sc->sc_hwreset)(sc);
765 
766 	/* step 3. Setup data structures in host memory */
767 	gem_meminit(sc);
768 
769 	/* step 4. TX MAC registers & counters */
770 	gem_init_regs(sc);
771 
772 	/* step 5. RX MAC registers & counters */
773 	gem_iff(sc);
774 
775 	/* step 6 & 7. Program Descriptor Ring Base Addresses */
776 	bus_space_write_4(t, h, GEM_TX_RING_PTR_HI,
777 	    (((uint64_t)GEM_CDTXADDR(sc,0)) >> 32));
778 	bus_space_write_4(t, h, GEM_TX_RING_PTR_LO, GEM_CDTXADDR(sc, 0));
779 
780 	bus_space_write_4(t, h, GEM_RX_RING_PTR_HI,
781 	    (((uint64_t)GEM_CDRXADDR(sc,0)) >> 32));
782 	bus_space_write_4(t, h, GEM_RX_RING_PTR_LO, GEM_CDRXADDR(sc, 0));
783 
784 	/* step 8. Global Configuration & Interrupt Mask */
785 	bus_space_write_4(t, h, GEM_INTMASK,
786 		      ~(GEM_INTR_TX_INTME|
787 			GEM_INTR_TX_EMPTY|
788 			GEM_INTR_RX_DONE|GEM_INTR_RX_NOBUF|
789 			GEM_INTR_RX_TAG_ERR|GEM_INTR_PCS|
790 			GEM_INTR_MAC_CONTROL|GEM_INTR_MIF|
791 			GEM_INTR_BERR));
792 	bus_space_write_4(t, h, GEM_MAC_RX_MASK,
793 	    GEM_MAC_RX_DONE|GEM_MAC_RX_FRAME_CNT);
794 	bus_space_write_4(t, h, GEM_MAC_TX_MASK, 0xffff); /* XXXX */
795 	bus_space_write_4(t, h, GEM_MAC_CONTROL_MASK, 0); /* XXXX */
796 
797 	/* step 9. ETX Configuration: use mostly default values */
798 
799 	/* Enable DMA */
800 	v = gem_ringsize(GEM_NTXDESC /*XXX*/);
801 	v |= ((sc->sc_variant == GEM_SUN_ERI ? 0x100 : 0x04ff) << 10) &
802 	    GEM_TX_CONFIG_TXFIFO_TH;
803 	bus_space_write_4(t, h, GEM_TX_CONFIG, v | GEM_TX_CONFIG_TXDMA_EN);
804 	bus_space_write_4(t, h, GEM_TX_KICK, 0);
805 
806 	/* step 10. ERX Configuration */
807 
808 	/* Encode Receive Descriptor ring size: four possible values */
809 	v = gem_ringsize(GEM_NRXDESC /*XXX*/);
810 	/* Enable DMA */
811 	bus_space_write_4(t, h, GEM_RX_CONFIG,
812 		v|(GEM_THRSH_1024<<GEM_RX_CONFIG_FIFO_THRS_SHIFT)|
813 		(2<<GEM_RX_CONFIG_FBOFF_SHFT)|GEM_RX_CONFIG_RXDMA_EN|
814 		(0<<GEM_RX_CONFIG_CXM_START_SHFT));
815 	/*
816 	 * The following value is for an OFF Threshold of about 3/4 full
817 	 * and an ON Threshold of 1/4 full.
818 	 */
819 	bus_space_write_4(t, h, GEM_RX_PAUSE_THRESH,
820 	    (3 * sc->sc_rxfifosize / 256) |
821 	    ((sc->sc_rxfifosize / 256) << 12));
822 	bus_space_write_4(t, h, GEM_RX_BLANKING, (6 << 12) | 6);
823 
824 	/* step 11. Configure Media */
825 	mii_mediachg(&sc->sc_mii);
826 
827 	/* step 12. RX_MAC Configuration Register */
828 	v = bus_space_read_4(t, h, GEM_MAC_RX_CONFIG);
829 	v |= GEM_MAC_RX_ENABLE | GEM_MAC_RX_STRIP_CRC;
830 	bus_space_write_4(t, h, GEM_MAC_RX_CONFIG, v);
831 
832 	/* step 14. Issue Transmit Pending command */
833 
834 	/* Call MI initialization function if any */
835 	if (sc->sc_hwinit)
836 		(*sc->sc_hwinit)(sc);
837 
838 	/* step 15.  Give the receiver a swift kick */
839 	bus_space_write_4(t, h, GEM_RX_KICK, sc->sc_rx_prod);
840 
841 	/* Start the one second timer. */
842 	timeout_add_sec(&sc->sc_tick_ch, 1);
843 
844 	ifp->if_flags |= IFF_RUNNING;
845 	ifq_clr_oactive(&ifp->if_snd);
846 
847 	splx(s);
848 
849 	return (0);
850 }
851 
852 void
853 gem_init_regs(struct gem_softc *sc)
854 {
855 	bus_space_tag_t t = sc->sc_bustag;
856 	bus_space_handle_t h = sc->sc_h1;
857 	u_int32_t v;
858 
859 	/* These regs are not cleared on reset */
860 	sc->sc_inited = 0;
861 	if (!sc->sc_inited) {
862 		/* Load recommended values */
863 		bus_space_write_4(t, h, GEM_MAC_IPG0, 0x00);
864 		bus_space_write_4(t, h, GEM_MAC_IPG1, 0x08);
865 		bus_space_write_4(t, h, GEM_MAC_IPG2, 0x04);
866 
867 		bus_space_write_4(t, h, GEM_MAC_MAC_MIN_FRAME, ETHER_MIN_LEN);
868 		/* Max frame and max burst size */
869 		bus_space_write_4(t, h, GEM_MAC_MAC_MAX_FRAME,
870 		    (ETHER_MAX_LEN + ETHER_VLAN_ENCAP_LEN) | (0x2000 << 16));
871 
872 		bus_space_write_4(t, h, GEM_MAC_PREAMBLE_LEN, 0x07);
873 		bus_space_write_4(t, h, GEM_MAC_JAM_SIZE, 0x04);
874 		bus_space_write_4(t, h, GEM_MAC_ATTEMPT_LIMIT, 0x10);
875 		bus_space_write_4(t, h, GEM_MAC_CONTROL_TYPE, 0x8088);
876 		bus_space_write_4(t, h, GEM_MAC_RANDOM_SEED,
877 		    ((sc->sc_arpcom.ac_enaddr[5]<<8)|sc->sc_arpcom.ac_enaddr[4])&0x3ff);
878 
879 		/* Secondary MAC addr set to 0:0:0:0:0:0 */
880 		bus_space_write_4(t, h, GEM_MAC_ADDR3, 0);
881 		bus_space_write_4(t, h, GEM_MAC_ADDR4, 0);
882 		bus_space_write_4(t, h, GEM_MAC_ADDR5, 0);
883 
884 		/* MAC control addr set to 0:1:c2:0:1:80 */
885 		bus_space_write_4(t, h, GEM_MAC_ADDR6, 0x0001);
886 		bus_space_write_4(t, h, GEM_MAC_ADDR7, 0xc200);
887 		bus_space_write_4(t, h, GEM_MAC_ADDR8, 0x0180);
888 
889 		/* MAC filter addr set to 0:0:0:0:0:0 */
890 		bus_space_write_4(t, h, GEM_MAC_ADDR_FILTER0, 0);
891 		bus_space_write_4(t, h, GEM_MAC_ADDR_FILTER1, 0);
892 		bus_space_write_4(t, h, GEM_MAC_ADDR_FILTER2, 0);
893 
894 		bus_space_write_4(t, h, GEM_MAC_ADR_FLT_MASK1_2, 0);
895 		bus_space_write_4(t, h, GEM_MAC_ADR_FLT_MASK0, 0);
896 
897 		sc->sc_inited = 1;
898 	}
899 
900 	/* Counters need to be zeroed */
901 	bus_space_write_4(t, h, GEM_MAC_NORM_COLL_CNT, 0);
902 	bus_space_write_4(t, h, GEM_MAC_FIRST_COLL_CNT, 0);
903 	bus_space_write_4(t, h, GEM_MAC_EXCESS_COLL_CNT, 0);
904 	bus_space_write_4(t, h, GEM_MAC_LATE_COLL_CNT, 0);
905 	bus_space_write_4(t, h, GEM_MAC_DEFER_TMR_CNT, 0);
906 	bus_space_write_4(t, h, GEM_MAC_PEAK_ATTEMPTS, 0);
907 	bus_space_write_4(t, h, GEM_MAC_RX_FRAME_COUNT, 0);
908 	bus_space_write_4(t, h, GEM_MAC_RX_LEN_ERR_CNT, 0);
909 	bus_space_write_4(t, h, GEM_MAC_RX_ALIGN_ERR, 0);
910 	bus_space_write_4(t, h, GEM_MAC_RX_CRC_ERR_CNT, 0);
911 	bus_space_write_4(t, h, GEM_MAC_RX_CODE_VIOL, 0);
912 
913 	/* Set XOFF PAUSE time */
914 	bus_space_write_4(t, h, GEM_MAC_SEND_PAUSE_CMD, 0x1bf0);
915 
916 	/*
917 	 * Set the internal arbitration to "infinite" bursts of the
918 	 * maximum length of 31 * 64 bytes so DMA transfers aren't
919 	 * split up in cache line size chunks. This greatly improves
920 	 * especially RX performance.
921 	 * Enable silicon bug workarounds for the Apple variants.
922 	 */
923 	v = GEM_CONFIG_TXDMA_LIMIT | GEM_CONFIG_RXDMA_LIMIT;
924 	if (sc->sc_pci)
925 		v |= GEM_CONFIG_BURST_INF;
926 	else
927 		v |= GEM_CONFIG_BURST_64;
928 	if (sc->sc_variant != GEM_SUN_GEM && sc->sc_variant != GEM_SUN_ERI)
929 		v |= GEM_CONFIG_RONPAULBIT | GEM_CONFIG_BUG2FIX;
930 	bus_space_write_4(t, h, GEM_CONFIG, v);
931 
932 	/*
933 	 * Set the station address.
934 	 */
935 	bus_space_write_4(t, h, GEM_MAC_ADDR0,
936 		(sc->sc_arpcom.ac_enaddr[4]<<8) | sc->sc_arpcom.ac_enaddr[5]);
937 	bus_space_write_4(t, h, GEM_MAC_ADDR1,
938 		(sc->sc_arpcom.ac_enaddr[2]<<8) | sc->sc_arpcom.ac_enaddr[3]);
939 	bus_space_write_4(t, h, GEM_MAC_ADDR2,
940 		(sc->sc_arpcom.ac_enaddr[0]<<8) | sc->sc_arpcom.ac_enaddr[1]);
941 }
942 
943 /*
944  * Receive interrupt.
945  */
946 int
947 gem_rint(struct gem_softc *sc)
948 {
949 	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
950 	bus_space_tag_t t = sc->sc_bustag;
951 	bus_space_handle_t h = sc->sc_h1;
952 	struct gem_rxsoft *rxs;
953 	struct mbuf_list ml = MBUF_LIST_INITIALIZER();
954 	struct mbuf *m;
955 	u_int64_t rxstat;
956 	int i, len;
957 
958 	if (if_rxr_inuse(&sc->sc_rx_ring) == 0)
959 		return (0);
960 
961 	for (i = sc->sc_rx_cons; if_rxr_inuse(&sc->sc_rx_ring) > 0;
962 	    i = GEM_NEXTRX(i)) {
963 		rxs = &sc->sc_rxsoft[i];
964 
965 		GEM_CDRXSYNC(sc, i,
966 		    BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
967 
968 		rxstat = GEM_DMA_READ(sc, &sc->sc_rxdescs[i].gd_flags);
969 
970 		if (rxstat & GEM_RD_OWN) {
971 			/* We have processed all of the receive buffers. */
972 			break;
973 		}
974 
975 		bus_dmamap_sync(sc->sc_dmatag, rxs->rxs_dmamap, 0,
976 		    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
977 		bus_dmamap_unload(sc->sc_dmatag, rxs->rxs_dmamap);
978 
979 		m = rxs->rxs_mbuf;
980 		rxs->rxs_mbuf = NULL;
981 
982 		if_rxr_put(&sc->sc_rx_ring, 1);
983 
984 		if (rxstat & GEM_RD_BAD_CRC) {
985 			ifp->if_ierrors++;
986 #ifdef GEM_DEBUG
987 			printf("%s: receive error: CRC error\n",
988 				sc->sc_dev.dv_xname);
989 #endif
990 			m_freem(m);
991 			continue;
992 		}
993 
994 #ifdef GEM_DEBUG
995 		if (ifp->if_flags & IFF_DEBUG) {
996 			printf("    rxsoft %p descriptor %d: ", rxs, i);
997 			printf("gd_flags: 0x%016llx\t", (long long)
998 				GEM_DMA_READ(sc, &sc->sc_rxdescs[i].gd_flags));
999 			printf("gd_addr: 0x%016llx\n", (long long)
1000 				GEM_DMA_READ(sc, &sc->sc_rxdescs[i].gd_addr));
1001 		}
1002 #endif
1003 
1004 		/* No errors; receive the packet. */
1005 		len = GEM_RD_BUFLEN(rxstat);
1006 
1007 		m->m_data += 2; /* We're already off by two */
1008 		m->m_pkthdr.len = m->m_len = len;
1009 
1010 		ml_enqueue(&ml, m);
1011 	}
1012 
1013 	/* Update the receive pointer. */
1014 	sc->sc_rx_cons = i;
1015 	gem_fill_rx_ring(sc);
1016 	bus_space_write_4(t, h, GEM_RX_KICK, sc->sc_rx_prod);
1017 
1018 	DPRINTF(sc, ("gem_rint: done sc->sc_rx_cons %d, complete %d\n",
1019 		sc->sc_rx_cons, bus_space_read_4(t, h, GEM_RX_COMPLETION)));
1020 
1021 	if_input(ifp, &ml);
1022 
1023 	return (1);
1024 }
1025 
1026 void
1027 gem_fill_rx_ring(struct gem_softc *sc)
1028 {
1029 	u_int slots;
1030 
1031 	for (slots = if_rxr_get(&sc->sc_rx_ring, GEM_NRXDESC - 4);
1032 	    slots > 0; slots--) {
1033 		if (gem_add_rxbuf(sc, sc->sc_rx_prod))
1034 			break;
1035 	}
1036 	if_rxr_put(&sc->sc_rx_ring, slots);
1037 }
1038 
1039 /*
1040  * Add a receive buffer to the indicated descriptor.
1041  */
1042 int
1043 gem_add_rxbuf(struct gem_softc *sc, int idx)
1044 {
1045 	struct gem_rxsoft *rxs = &sc->sc_rxsoft[idx];
1046 	struct mbuf *m;
1047 	int error;
1048 
1049 	m = MCLGETI(NULL, M_DONTWAIT, NULL, MCLBYTES);
1050 	if (!m)
1051 		return (ENOBUFS);
1052 	m->m_len = m->m_pkthdr.len = MCLBYTES;
1053 
1054 #ifdef GEM_DEBUG
1055 /* bzero the packet to check dma */
1056 	memset(m->m_ext.ext_buf, 0, m->m_ext.ext_size);
1057 #endif
1058 
1059 	rxs->rxs_mbuf = m;
1060 
1061 	error = bus_dmamap_load_mbuf(sc->sc_dmatag, rxs->rxs_dmamap, m,
1062 	    BUS_DMA_READ|BUS_DMA_NOWAIT);
1063 	if (error) {
1064 		printf("%s: can't load rx DMA map %d, error = %d\n",
1065 		    sc->sc_dev.dv_xname, idx, error);
1066 		panic("gem_add_rxbuf");	/* XXX */
1067 	}
1068 
1069 	bus_dmamap_sync(sc->sc_dmatag, rxs->rxs_dmamap, 0,
1070 	    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
1071 
1072 	GEM_INIT_RXDESC(sc, idx);
1073 
1074 	sc->sc_rx_prod = GEM_NEXTRX(sc->sc_rx_prod);
1075 
1076 	return (0);
1077 }
1078 
1079 int
1080 gem_eint(struct gem_softc *sc, u_int status)
1081 {
1082 	if ((status & GEM_INTR_MIF) != 0) {
1083 #ifdef GEM_DEBUG
1084 		printf("%s: link status changed\n", sc->sc_dev.dv_xname);
1085 #endif
1086 		return (1);
1087 	}
1088 
1089 	printf("%s: status=%b\n", sc->sc_dev.dv_xname, status, GEM_INTR_BITS);
1090 	return (1);
1091 }
1092 
1093 int
1094 gem_pint(struct gem_softc *sc)
1095 {
1096 	bus_space_tag_t t = sc->sc_bustag;
1097 	bus_space_handle_t seb = sc->sc_h1;
1098 	u_int32_t status;
1099 
1100 	status = bus_space_read_4(t, seb, GEM_MII_INTERRUP_STATUS);
1101 	status |= bus_space_read_4(t, seb, GEM_MII_INTERRUP_STATUS);
1102 #ifdef GEM_DEBUG
1103 	if (status)
1104 		printf("%s: link status changed\n", sc->sc_dev.dv_xname);
1105 #endif
1106 	return (1);
1107 }
1108 
1109 int
1110 gem_intr(void *v)
1111 {
1112 	struct gem_softc *sc = (struct gem_softc *)v;
1113 	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
1114 	bus_space_tag_t t = sc->sc_bustag;
1115 	bus_space_handle_t seb = sc->sc_h1;
1116 	u_int32_t status;
1117 	int r = 0;
1118 
1119 	status = bus_space_read_4(t, seb, GEM_STATUS);
1120 	DPRINTF(sc, ("%s: gem_intr: cplt %xstatus %b\n",
1121 		sc->sc_dev.dv_xname, (status>>19), status, GEM_INTR_BITS));
1122 
1123 	if (status == 0xffffffff)
1124 		return (0);
1125 
1126 	if ((status & GEM_INTR_PCS) != 0)
1127 		r |= gem_pint(sc);
1128 
1129 	if ((status & (GEM_INTR_RX_TAG_ERR | GEM_INTR_BERR)) != 0)
1130 		r |= gem_eint(sc, status);
1131 
1132 	if ((status & (GEM_INTR_TX_EMPTY | GEM_INTR_TX_INTME)) != 0)
1133 		r |= gem_tint(sc, status);
1134 
1135 	if ((status & (GEM_INTR_RX_DONE | GEM_INTR_RX_NOBUF)) != 0)
1136 		r |= gem_rint(sc);
1137 
1138 	/* We should eventually do more than just print out error stats. */
1139 	if (status & GEM_INTR_TX_MAC) {
1140 		int txstat = bus_space_read_4(t, seb, GEM_MAC_TX_STATUS);
1141 #ifdef GEM_DEBUG
1142 		if (txstat & ~GEM_MAC_TX_XMIT_DONE)
1143 			printf("%s: MAC tx fault, status %x\n",
1144 			    sc->sc_dev.dv_xname, txstat);
1145 #endif
1146 		if (txstat & (GEM_MAC_TX_UNDERRUN | GEM_MAC_TX_PKT_TOO_LONG)) {
1147 			KERNEL_LOCK();
1148 			gem_init(ifp);
1149 			KERNEL_UNLOCK();
1150 		}
1151 	}
1152 	if (status & GEM_INTR_RX_MAC) {
1153 		int rxstat = bus_space_read_4(t, seb, GEM_MAC_RX_STATUS);
1154 #ifdef GEM_DEBUG
1155  		if (rxstat & ~GEM_MAC_RX_DONE)
1156  			printf("%s: MAC rx fault, status %x\n",
1157  			    sc->sc_dev.dv_xname, rxstat);
1158 #endif
1159 		if (rxstat & GEM_MAC_RX_OVERFLOW) {
1160 			ifp->if_ierrors++;
1161 
1162 			/*
1163 			 * Apparently a silicon bug causes ERI to hang
1164 			 * from time to time.  So if we detect an RX
1165 			 * FIFO overflow, we fire off a timer, and
1166 			 * check whether we're still making progress
1167 			 * by looking at the RX FIFO write and read
1168 			 * pointers.
1169 			 */
1170 			sc->sc_rx_fifo_wr_ptr =
1171 				bus_space_read_4(t, seb, GEM_RX_FIFO_WR_PTR);
1172 			sc->sc_rx_fifo_rd_ptr =
1173 				bus_space_read_4(t, seb, GEM_RX_FIFO_RD_PTR);
1174 			timeout_add_msec(&sc->sc_rx_watchdog, 400);
1175 		}
1176 #ifdef GEM_DEBUG
1177 		else if (rxstat & ~(GEM_MAC_RX_DONE | GEM_MAC_RX_FRAME_CNT))
1178 			printf("%s: MAC rx fault, status %x\n",
1179 			    sc->sc_dev.dv_xname, rxstat);
1180 #endif
1181 	}
1182 	return (r);
1183 }
1184 
1185 void
1186 gem_rx_watchdog(void *arg)
1187 {
1188 	struct gem_softc *sc = arg;
1189 	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
1190 	bus_space_tag_t t = sc->sc_bustag;
1191 	bus_space_handle_t h = sc->sc_h1;
1192 	u_int32_t rx_fifo_wr_ptr;
1193 	u_int32_t rx_fifo_rd_ptr;
1194 	u_int32_t state;
1195 
1196 	if ((ifp->if_flags & IFF_RUNNING) == 0)
1197 		return;
1198 
1199 	rx_fifo_wr_ptr = bus_space_read_4(t, h, GEM_RX_FIFO_WR_PTR);
1200 	rx_fifo_rd_ptr = bus_space_read_4(t, h, GEM_RX_FIFO_RD_PTR);
1201 	state = bus_space_read_4(t, h, GEM_MAC_MAC_STATE);
1202 	if ((state & GEM_MAC_STATE_OVERFLOW) == GEM_MAC_STATE_OVERFLOW &&
1203 	    ((rx_fifo_wr_ptr == rx_fifo_rd_ptr) ||
1204 	     ((sc->sc_rx_fifo_wr_ptr == rx_fifo_wr_ptr) &&
1205 	      (sc->sc_rx_fifo_rd_ptr == rx_fifo_rd_ptr)))) {
1206 		/*
1207 		 * The RX state machine is still in overflow state and
1208 		 * the RX FIFO write and read pointers seem to be
1209 		 * stuck.  Whack the chip over the head to get things
1210 		 * going again.
1211 		 */
1212 		gem_init(ifp);
1213 	}
1214 }
1215 
1216 void
1217 gem_watchdog(struct ifnet *ifp)
1218 {
1219 	struct gem_softc *sc = ifp->if_softc;
1220 
1221 	DPRINTF(sc, ("gem_watchdog: GEM_RX_CONFIG %x GEM_MAC_RX_STATUS %x "
1222 		"GEM_MAC_RX_CONFIG %x\n",
1223 		bus_space_read_4(sc->sc_bustag, sc->sc_h1, GEM_RX_CONFIG),
1224 		bus_space_read_4(sc->sc_bustag, sc->sc_h1, GEM_MAC_RX_STATUS),
1225 		bus_space_read_4(sc->sc_bustag, sc->sc_h1, GEM_MAC_RX_CONFIG)));
1226 
1227 	log(LOG_ERR, "%s: device timeout\n", sc->sc_dev.dv_xname);
1228 	++ifp->if_oerrors;
1229 
1230 	/* Try to get more packets going. */
1231 	gem_init(ifp);
1232 }
1233 
1234 /*
1235  * Initialize the MII Management Interface
1236  */
1237 void
1238 gem_mifinit(struct gem_softc *sc)
1239 {
1240 	bus_space_tag_t t = sc->sc_bustag;
1241 	bus_space_handle_t mif = sc->sc_h1;
1242 
1243 	/* Configure the MIF in frame mode */
1244 	sc->sc_mif_config = bus_space_read_4(t, mif, GEM_MIF_CONFIG);
1245 	sc->sc_mif_config &= ~GEM_MIF_CONFIG_BB_ENA;
1246 	bus_space_write_4(t, mif, GEM_MIF_CONFIG, sc->sc_mif_config);
1247 }
1248 
1249 /*
1250  * MII interface
1251  *
1252  * The GEM MII interface supports at least three different operating modes:
1253  *
1254  * Bitbang mode is implemented using data, clock and output enable registers.
1255  *
1256  * Frame mode is implemented by loading a complete frame into the frame
1257  * register and polling the valid bit for completion.
1258  *
1259  * Polling mode uses the frame register but completion is indicated by
1260  * an interrupt.
1261  *
1262  */
1263 int
1264 gem_mii_readreg(struct device *self, int phy, int reg)
1265 {
1266 	struct gem_softc *sc = (void *)self;
1267 	bus_space_tag_t t = sc->sc_bustag;
1268 	bus_space_handle_t mif = sc->sc_h1;
1269 	int n;
1270 	u_int32_t v;
1271 
1272 #ifdef GEM_DEBUG
1273 	if (sc->sc_debug)
1274 		printf("gem_mii_readreg: phy %d reg %d\n", phy, reg);
1275 #endif
1276 
1277 	/* Construct the frame command */
1278 	v = (reg << GEM_MIF_REG_SHIFT)	| (phy << GEM_MIF_PHY_SHIFT) |
1279 		GEM_MIF_FRAME_READ;
1280 
1281 	bus_space_write_4(t, mif, GEM_MIF_FRAME, v);
1282 	for (n = 0; n < 100; n++) {
1283 		DELAY(1);
1284 		v = bus_space_read_4(t, mif, GEM_MIF_FRAME);
1285 		if (v & GEM_MIF_FRAME_TA0)
1286 			return (v & GEM_MIF_FRAME_DATA);
1287 	}
1288 
1289 	printf("%s: mii_read timeout\n", sc->sc_dev.dv_xname);
1290 	return (0);
1291 }
1292 
1293 void
1294 gem_mii_writereg(struct device *self, int phy, int reg, int val)
1295 {
1296 	struct gem_softc *sc = (void *)self;
1297 	bus_space_tag_t t = sc->sc_bustag;
1298 	bus_space_handle_t mif = sc->sc_h1;
1299 	int n;
1300 	u_int32_t v;
1301 
1302 #ifdef GEM_DEBUG
1303 	if (sc->sc_debug)
1304 		printf("gem_mii_writereg: phy %d reg %d val %x\n",
1305 			phy, reg, val);
1306 #endif
1307 
1308 	/* Construct the frame command */
1309 	v = GEM_MIF_FRAME_WRITE			|
1310 	    (phy << GEM_MIF_PHY_SHIFT)		|
1311 	    (reg << GEM_MIF_REG_SHIFT)		|
1312 	    (val & GEM_MIF_FRAME_DATA);
1313 
1314 	bus_space_write_4(t, mif, GEM_MIF_FRAME, v);
1315 	for (n = 0; n < 100; n++) {
1316 		DELAY(1);
1317 		v = bus_space_read_4(t, mif, GEM_MIF_FRAME);
1318 		if (v & GEM_MIF_FRAME_TA0)
1319 			return;
1320 	}
1321 
1322 	printf("%s: mii_write timeout\n", sc->sc_dev.dv_xname);
1323 }
1324 
1325 void
1326 gem_mii_statchg(struct device *dev)
1327 {
1328 	struct gem_softc *sc = (void *)dev;
1329 #ifdef GEM_DEBUG
1330 	uint64_t instance = IFM_INST(sc->sc_mii.mii_media.ifm_cur->ifm_media);
1331 #endif
1332 	bus_space_tag_t t = sc->sc_bustag;
1333 	bus_space_handle_t mac = sc->sc_h1;
1334 	u_int32_t v;
1335 
1336 #ifdef GEM_DEBUG
1337 	if (sc->sc_debug)
1338 		printf("gem_mii_statchg: status change: phy = %lld\n", instance);
1339 #endif
1340 
1341 	/* Set tx full duplex options */
1342 	bus_space_write_4(t, mac, GEM_MAC_TX_CONFIG, 0);
1343 	delay(10000); /* reg must be cleared and delay before changing. */
1344 	v = GEM_MAC_TX_ENA_IPG0|GEM_MAC_TX_NGU|GEM_MAC_TX_NGU_LIMIT|
1345 		GEM_MAC_TX_ENABLE;
1346 	if ((IFM_OPTIONS(sc->sc_mii.mii_media_active) & IFM_FDX) != 0) {
1347 		v |= GEM_MAC_TX_IGN_CARRIER|GEM_MAC_TX_IGN_COLLIS;
1348 	}
1349 	bus_space_write_4(t, mac, GEM_MAC_TX_CONFIG, v);
1350 
1351 	/* XIF Configuration */
1352 	v = GEM_MAC_XIF_TX_MII_ENA;
1353 	v |= GEM_MAC_XIF_LINK_LED;
1354 
1355 	/* External MII needs echo disable if half duplex. */
1356 	if ((IFM_OPTIONS(sc->sc_mii.mii_media_active) & IFM_FDX) != 0)
1357 		/* turn on full duplex LED */
1358 		v |= GEM_MAC_XIF_FDPLX_LED;
1359 	else
1360 		/* half duplex -- disable echo */
1361 		v |= GEM_MAC_XIF_ECHO_DISABL;
1362 
1363 	switch (IFM_SUBTYPE(sc->sc_mii.mii_media_active)) {
1364 	case IFM_1000_T:  /* Gigabit using GMII interface */
1365 	case IFM_1000_SX:
1366 		v |= GEM_MAC_XIF_GMII_MODE;
1367 		break;
1368 	default:
1369 		v &= ~GEM_MAC_XIF_GMII_MODE;
1370 	}
1371 	bus_space_write_4(t, mac, GEM_MAC_XIF_CONFIG, v);
1372 
1373 	/*
1374 	 * 802.3x flow control
1375 	 */
1376 	v = bus_space_read_4(t, mac, GEM_MAC_CONTROL_CONFIG);
1377 	v &= ~(GEM_MAC_CC_RX_PAUSE | GEM_MAC_CC_TX_PAUSE);
1378 	if ((IFM_OPTIONS(sc->sc_mii.mii_media_active) & IFM_ETH_RXPAUSE) != 0)
1379 		v |= GEM_MAC_CC_RX_PAUSE;
1380 	if ((IFM_OPTIONS(sc->sc_mii.mii_media_active) & IFM_ETH_TXPAUSE) != 0)
1381 		v |= GEM_MAC_CC_TX_PAUSE;
1382 	bus_space_write_4(t, mac, GEM_MAC_CONTROL_CONFIG, v);
1383 }
1384 
1385 int
1386 gem_pcs_readreg(struct device *self, int phy, int reg)
1387 {
1388 	struct gem_softc *sc = (void *)self;
1389 	bus_space_tag_t t = sc->sc_bustag;
1390 	bus_space_handle_t pcs = sc->sc_h1;
1391 
1392 #ifdef GEM_DEBUG
1393 	if (sc->sc_debug)
1394 		printf("gem_pcs_readreg: phy %d reg %d\n", phy, reg);
1395 #endif
1396 
1397 	if (phy != GEM_PHYAD_EXTERNAL)
1398 		return (0);
1399 
1400 	switch (reg) {
1401 	case MII_BMCR:
1402 		reg = GEM_MII_CONTROL;
1403 		break;
1404 	case MII_BMSR:
1405 		reg = GEM_MII_STATUS;
1406 		break;
1407 	case MII_ANAR:
1408 		reg = GEM_MII_ANAR;
1409 		break;
1410 	case MII_ANLPAR:
1411 		reg = GEM_MII_ANLPAR;
1412 		break;
1413 	case MII_EXTSR:
1414 		return (EXTSR_1000XFDX|EXTSR_1000XHDX);
1415 	default:
1416 		return (0);
1417 	}
1418 
1419 	return bus_space_read_4(t, pcs, reg);
1420 }
1421 
1422 void
1423 gem_pcs_writereg(struct device *self, int phy, int reg, int val)
1424 {
1425 	struct gem_softc *sc = (void *)self;
1426 	bus_space_tag_t t = sc->sc_bustag;
1427 	bus_space_handle_t pcs = sc->sc_h1;
1428 	int reset = 0;
1429 
1430 #ifdef GEM_DEBUG
1431 	if (sc->sc_debug)
1432 		printf("gem_pcs_writereg: phy %d reg %d val %x\n",
1433 			phy, reg, val);
1434 #endif
1435 
1436 	if (phy != GEM_PHYAD_EXTERNAL)
1437 		return;
1438 
1439 	if (reg == MII_ANAR)
1440 		bus_space_write_4(t, pcs, GEM_MII_CONFIG, 0);
1441 
1442 	switch (reg) {
1443 	case MII_BMCR:
1444 		reset = (val & GEM_MII_CONTROL_RESET);
1445 		reg = GEM_MII_CONTROL;
1446 		break;
1447 	case MII_BMSR:
1448 		reg = GEM_MII_STATUS;
1449 		break;
1450 	case MII_ANAR:
1451 		reg = GEM_MII_ANAR;
1452 		break;
1453 	case MII_ANLPAR:
1454 		reg = GEM_MII_ANLPAR;
1455 		break;
1456 	default:
1457 		return;
1458 	}
1459 
1460 	bus_space_write_4(t, pcs, reg, val);
1461 
1462 	if (reset)
1463 		gem_bitwait(sc, pcs, GEM_MII_CONTROL, GEM_MII_CONTROL_RESET, 0);
1464 
1465 	if (reg == GEM_MII_ANAR || reset) {
1466 		bus_space_write_4(t, pcs, GEM_MII_SLINK_CONTROL,
1467 		    GEM_MII_SLINK_LOOPBACK|GEM_MII_SLINK_EN_SYNC_D);
1468 		bus_space_write_4(t, pcs, GEM_MII_CONFIG,
1469 		    GEM_MII_CONFIG_ENABLE);
1470 	}
1471 }
1472 
1473 int
1474 gem_mediachange(struct ifnet *ifp)
1475 {
1476 	struct gem_softc *sc = ifp->if_softc;
1477 	struct mii_data *mii = &sc->sc_mii;
1478 
1479 	if (mii->mii_instance) {
1480 		struct mii_softc *miisc;
1481 		LIST_FOREACH(miisc, &mii->mii_phys, mii_list)
1482 			mii_phy_reset(miisc);
1483 	}
1484 
1485 	return (mii_mediachg(&sc->sc_mii));
1486 }
1487 
1488 void
1489 gem_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
1490 {
1491 	struct gem_softc *sc = ifp->if_softc;
1492 
1493 	mii_pollstat(&sc->sc_mii);
1494 	ifmr->ifm_active = sc->sc_mii.mii_media_active;
1495 	ifmr->ifm_status = sc->sc_mii.mii_media_status;
1496 }
1497 
1498 /*
1499  * Process an ioctl request.
1500  */
1501 int
1502 gem_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
1503 {
1504 	struct gem_softc *sc = ifp->if_softc;
1505 	struct ifreq *ifr = (struct ifreq *)data;
1506 	int s, error = 0;
1507 
1508 	s = splnet();
1509 
1510 	switch (cmd) {
1511 	case SIOCSIFADDR:
1512 		ifp->if_flags |= IFF_UP;
1513 		if ((ifp->if_flags & IFF_RUNNING) == 0)
1514 			gem_init(ifp);
1515 		break;
1516 
1517 	case SIOCSIFFLAGS:
1518 		if (ifp->if_flags & IFF_UP) {
1519 			if (ifp->if_flags & IFF_RUNNING)
1520 				error = ENETRESET;
1521 			else
1522 				gem_init(ifp);
1523 		} else {
1524 			if (ifp->if_flags & IFF_RUNNING)
1525 				gem_stop(ifp, 0);
1526 		}
1527 #ifdef GEM_DEBUG
1528 		sc->sc_debug = (ifp->if_flags & IFF_DEBUG) != 0 ? 1 : 0;
1529 #endif
1530 		break;
1531 
1532 	case SIOCGIFMEDIA:
1533 	case SIOCSIFMEDIA:
1534 		error = ifmedia_ioctl(ifp, ifr, &sc->sc_media, cmd);
1535 		break;
1536 
1537 	case SIOCGIFRXR:
1538 		error = if_rxr_ioctl((struct if_rxrinfo *)ifr->ifr_data,
1539 		    NULL, MCLBYTES, &sc->sc_rx_ring);
1540 		break;
1541 
1542 	default:
1543 		error = ether_ioctl(ifp, &sc->sc_arpcom, cmd, data);
1544 	}
1545 
1546 	if (error == ENETRESET) {
1547 		if (ifp->if_flags & IFF_RUNNING)
1548 			gem_iff(sc);
1549 		error = 0;
1550 	}
1551 
1552 	splx(s);
1553 	return (error);
1554 }
1555 
1556 void
1557 gem_iff(struct gem_softc *sc)
1558 {
1559 	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
1560 	struct arpcom *ac = &sc->sc_arpcom;
1561 	struct ether_multi *enm;
1562 	struct ether_multistep step;
1563 	bus_space_tag_t t = sc->sc_bustag;
1564 	bus_space_handle_t h = sc->sc_h1;
1565 	u_int32_t crc, hash[16], rxcfg;
1566 	int i;
1567 
1568 	rxcfg = bus_space_read_4(t, h, GEM_MAC_RX_CONFIG);
1569 	rxcfg &= ~(GEM_MAC_RX_HASH_FILTER | GEM_MAC_RX_PROMISCUOUS |
1570 	    GEM_MAC_RX_PROMISC_GRP);
1571 	ifp->if_flags &= ~IFF_ALLMULTI;
1572 
1573 	if (ifp->if_flags & IFF_PROMISC || ac->ac_multirangecnt > 0) {
1574 		ifp->if_flags |= IFF_ALLMULTI;
1575 		if (ifp->if_flags & IFF_PROMISC)
1576 			rxcfg |= GEM_MAC_RX_PROMISCUOUS;
1577 		else
1578 			rxcfg |= GEM_MAC_RX_PROMISC_GRP;
1579 	} else {
1580 		/*
1581 		 * Set up multicast address filter by passing all multicast
1582 		 * addresses through a crc generator, and then using the
1583 		 * high order 8 bits as an index into the 256 bit logical
1584 		 * address filter.  The high order 4 bits selects the word,
1585 		 * while the other 4 bits select the bit within the word
1586 		 * (where bit 0 is the MSB).
1587 		 */
1588 
1589 		rxcfg |= GEM_MAC_RX_HASH_FILTER;
1590 
1591 		/* Clear hash table */
1592 		for (i = 0; i < 16; i++)
1593 			hash[i] = 0;
1594 
1595 		ETHER_FIRST_MULTI(step, ac, enm);
1596 		while (enm != NULL) {
1597 			crc = ether_crc32_le(enm->enm_addrlo,
1598 			    ETHER_ADDR_LEN);
1599 
1600 			/* Just want the 8 most significant bits. */
1601 			crc >>= 24;
1602 
1603 			/* Set the corresponding bit in the filter. */
1604 			hash[crc >> 4] |= 1 << (15 - (crc & 15));
1605 
1606 			ETHER_NEXT_MULTI(step, enm);
1607 		}
1608 
1609 		/* Now load the hash table into the chip (if we are using it) */
1610 		for (i = 0; i < 16; i++) {
1611 			bus_space_write_4(t, h,
1612 			    GEM_MAC_HASH0 + i * (GEM_MAC_HASH1 - GEM_MAC_HASH0),
1613 			    hash[i]);
1614 		}
1615 	}
1616 
1617 	bus_space_write_4(t, h, GEM_MAC_RX_CONFIG, rxcfg);
1618 }
1619 
1620 /*
1621  * Transmit interrupt.
1622  */
1623 int
1624 gem_tint(struct gem_softc *sc, u_int32_t status)
1625 {
1626 	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
1627 	struct gem_sxd *sd;
1628 	u_int32_t cons, hwcons;
1629 	u_int32_t used, free = 0;
1630 
1631 	hwcons = status >> 19;
1632 	cons = sc->sc_tx_cons;
1633 	while (cons != hwcons) {
1634 		sd = &sc->sc_txd[cons];
1635 		if (sd->sd_mbuf != NULL) {
1636 			bus_dmamap_sync(sc->sc_dmatag, sd->sd_map, 0,
1637 			    sd->sd_map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
1638 			bus_dmamap_unload(sc->sc_dmatag, sd->sd_map);
1639 			m_freem(sd->sd_mbuf);
1640 			sd->sd_mbuf = NULL;
1641 			ifp->if_opackets++;
1642 		}
1643 		free++;
1644 		if (++cons == GEM_NTXDESC)
1645 			cons = 0;
1646 	}
1647 
1648 	sc->sc_tx_cons = cons;
1649 	used = atomic_sub_int_nv(&sc->sc_tx_cnt, free);
1650 
1651 	if (used == 0)
1652 		ifp->if_timer = 0;
1653 
1654 	if (ifq_is_oactive(&ifp->if_snd) && (used + GEM_NTXSEGS <
1655 	    GEM_NTXDESC - 2)) {
1656 		ifq_clr_oactive(&ifp->if_snd);
1657 
1658 		KERNEL_LOCK();
1659 		gem_start(ifp);
1660 		KERNEL_UNLOCK();
1661 	}
1662 
1663 	return (1);
1664 }
1665 
1666 int
1667 gem_load_mbuf(struct gem_softc *sc, struct gem_sxd *sd, struct mbuf *m)
1668 {
1669 	int error;
1670 
1671 	error = bus_dmamap_load_mbuf(sc->sc_dmatag, sd->sd_map, m,
1672 	    BUS_DMA_NOWAIT);
1673 	switch (error) {
1674 	case 0:
1675 		break;
1676 
1677 	case EFBIG: /* mbuf chain is too fragmented */
1678 		if (m_defrag(m, M_DONTWAIT) == 0 &&
1679 		    bus_dmamap_load_mbuf(sc->sc_dmatag, sd->sd_map, m,
1680 		    BUS_DMA_NOWAIT) == 0)
1681 		    	break;
1682 		/* FALLTHROUGH */
1683 	default:
1684 		return (1);
1685 	}
1686 
1687 	return (0);
1688 }
1689 
1690 void
1691 gem_start(struct ifnet *ifp)
1692 {
1693 	struct gem_softc *sc = ifp->if_softc;
1694 	struct gem_sxd *sd;
1695 	struct mbuf *m;
1696 	u_int64_t flags;
1697 	bus_dmamap_t map;
1698 	u_int32_t prod, first, last, i;
1699 	unsigned int used, new;
1700 
1701 	if (!(ifp->if_flags & IFF_RUNNING) || ifq_is_oactive(&ifp->if_snd))
1702 		return;
1703 
1704 	prod = sc->sc_tx_prod;
1705 	used = sc->sc_tx_cnt;
1706 	new = 0;
1707 
1708 	for (;;) {
1709 		if (used + new + GEM_NTXSEGS > (GEM_NTXDESC - 2)) {
1710 			ifq_set_oactive(&ifp->if_snd);
1711 			break;
1712 		}
1713 
1714 		IFQ_DEQUEUE(&ifp->if_snd, m);
1715 		if (m == NULL)
1716 			break;
1717 
1718 		first = prod;
1719 		sd = &sc->sc_txd[prod];
1720 		map = sd->sd_map;
1721 
1722 		if (gem_load_mbuf(sc, sd, m)) {
1723 			m_freem(m);
1724 			ifp->if_oerrors++;
1725 			continue;
1726 		}
1727 
1728 #if NBPFILTER > 0
1729 		if (ifp->if_bpf)
1730 			bpf_mtap(ifp->if_bpf, m, BPF_DIRECTION_OUT);
1731 #endif
1732 
1733 		bus_dmamap_sync(sc->sc_dmatag, map, 0, map->dm_mapsize,
1734 		    BUS_DMASYNC_PREWRITE);
1735 
1736 		for (i = 0; i < map->dm_nsegs; i++) {
1737 			GEM_DMA_WRITE(sc, &sc->sc_txdescs[prod].gd_addr,
1738 			    map->dm_segs[i].ds_addr);
1739 			flags = map->dm_segs[i].ds_len & GEM_TD_BUFSIZE;
1740 			if (i == 0)
1741 				flags |= GEM_TD_START_OF_PACKET;
1742 			if (i == (map->dm_nsegs - 1))
1743 				flags |= GEM_TD_END_OF_PACKET;
1744 			GEM_DMA_WRITE(sc, &sc->sc_txdescs[prod].gd_flags,
1745 			    flags);
1746 			bus_dmamap_sync(sc->sc_dmatag, sc->sc_cddmamap,
1747 			    GEM_CDTXOFF(prod), sizeof(struct gem_desc),
1748 			    BUS_DMASYNC_PREWRITE);
1749 
1750 			last = prod;
1751 			if (++prod == GEM_NTXDESC)
1752 				prod = 0;
1753 		}
1754 
1755 		new += map->dm_nsegs;
1756 		sc->sc_txd[last].sd_mbuf = m;
1757 		sc->sc_txd[first].sd_map = sc->sc_txd[last].sd_map;
1758 		sc->sc_txd[last].sd_map = map;
1759 	}
1760 
1761 	if (new == 0)
1762 		return;
1763 
1764 	atomic_add_int(&sc->sc_tx_cnt, new);
1765 
1766 	/* Commit. */
1767 	sc->sc_tx_prod = prod;
1768 
1769 	/* Transmit. */
1770 	bus_space_write_4(sc->sc_bustag, sc->sc_h1, GEM_TX_KICK, prod);
1771 
1772 	/* Set timeout in case hardware has problems transmitting. */
1773 	ifp->if_timer = 5;
1774 }
1775