xref: /openbsd-src/sys/dev/ic/gem.c (revision 3a3fbb3f2e2521ab7c4a56b7ff7462ebd9095ec5)
1 /*	$OpenBSD: gem.c,v 1.13 2001/12/13 03:51:10 drahn Exp $	*/
2 /*	$NetBSD: gem.c,v 1.1 2001/09/16 00:11:43 eeh Exp $ */
3 
4 /*
5  *
6  * Copyright (C) 2001 Eduardo Horvath.
7  * All rights reserved.
8  *
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR  ``AS IS'' AND
20  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR  BE LIABLE
23  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
25  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29  * SUCH DAMAGE.
30  *
31  */
32 
33 /*
34  * Driver for Sun GEM ethernet controllers.
35  */
36 
37 #include "bpfilter.h"
38 #include "vlan.h"
39 
40 #include <sys/param.h>
41 #include <sys/systm.h>
42 #include <sys/timeout.h>
43 #include <sys/mbuf.h>
44 #include <sys/syslog.h>
45 #include <sys/malloc.h>
46 #include <sys/kernel.h>
47 #include <sys/socket.h>
48 #include <sys/ioctl.h>
49 #include <sys/errno.h>
50 #include <sys/device.h>
51 
52 #include <machine/endian.h>
53 
54 #include <uvm/uvm_extern.h>
55 
56 #include <net/if.h>
57 #include <net/if_dl.h>
58 #include <net/if_media.h>
59 
60 #ifdef INET
61 #include <netinet/in.h>
62 #include <netinet/if_ether.h>
63 #endif
64 
65 #if NBPFILTER > 0
66 #include <net/bpf.h>
67 #endif
68 
69 #if NVLAN > 0
70 #include <net/if_vlan_var.h>
71 #endif
72 
73 #include <machine/bus.h>
74 #include <machine/intr.h>
75 
76 #include <dev/mii/mii.h>
77 #include <dev/mii/miivar.h>
78 #include <dev/mii/mii_bitbang.h>
79 
80 #include <dev/ic/gemreg.h>
81 #include <dev/ic/gemvar.h>
82 
83 #define TRIES	10000
84 
85 struct cfdriver gem_cd = {
86 	NULL, "gem", DV_IFNET
87 };
88 
89 void		gem_start __P((struct ifnet *));
90 void		gem_stop __P((struct ifnet *, int));
91 int		gem_ioctl __P((struct ifnet *, u_long, caddr_t));
92 void		gem_tick __P((void *));
93 void		gem_watchdog __P((struct ifnet *));
94 void		gem_shutdown __P((void *));
95 int		gem_init __P((struct ifnet *));
96 void		gem_init_regs(struct gem_softc *sc);
97 static int	gem_ringsize(int sz);
98 int		gem_meminit __P((struct gem_softc *));
99 void		gem_mifinit __P((struct gem_softc *));
100 void		gem_reset __P((struct gem_softc *));
101 int		gem_reset_rx(struct gem_softc *sc);
102 int		gem_reset_tx(struct gem_softc *sc);
103 int		gem_disable_rx(struct gem_softc *sc);
104 int		gem_disable_tx(struct gem_softc *sc);
105 void		gem_rxdrain(struct gem_softc *sc);
106 int		gem_add_rxbuf(struct gem_softc *sc, int idx);
107 void		gem_setladrf __P((struct gem_softc *));
108 int		gem_encap __P((struct gem_softc *, struct mbuf *, u_int32_t *));
109 
110 /* MII methods & callbacks */
111 static int	gem_mii_readreg __P((struct device *, int, int));
112 static void	gem_mii_writereg __P((struct device *, int, int, int));
113 static void	gem_mii_statchg __P((struct device *));
114 
115 int		gem_mediachange __P((struct ifnet *));
116 void		gem_mediastatus __P((struct ifnet *, struct ifmediareq *));
117 
118 struct mbuf	*gem_get __P((struct gem_softc *, int, int));
119 int		gem_put __P((struct gem_softc *, int, struct mbuf *));
120 void		gem_read __P((struct gem_softc *, int, int));
121 int		gem_eint __P((struct gem_softc *, u_int));
122 int		gem_rint __P((struct gem_softc *));
123 int		gem_tint __P((struct gem_softc *, u_int32_t));
124 void		gem_power __P((int, void *));
125 
126 static int	ether_cmp __P((u_char *, u_char *));
127 
128 #ifdef GEM_DEBUG
129 #define	DPRINTF(sc, x)	if ((sc)->sc_arpcom.ac_if.if_flags & IFF_DEBUG) \
130 				printf x
131 #else
132 #define	DPRINTF(sc, x)	/* nothing */
133 #endif
134 
135 
136 /*
137  * gem_config:
138  *
139  *	Attach a Gem interface to the system.
140  */
141 void
142 gem_config(sc)
143 	struct gem_softc *sc;
144 {
145 	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
146 	struct mii_data *mii = &sc->sc_mii;
147 	struct mii_softc *child;
148 	int i, error;
149 
150 	bcopy(sc->sc_enaddr, sc->sc_arpcom.ac_enaddr, ETHER_ADDR_LEN);
151 
152 	/* Make sure the chip is stopped. */
153 	ifp->if_softc = sc;
154 	gem_reset(sc);
155 
156 	/*
157 	 * Allocate the control data structures, and create and load the
158 	 * DMA map for it.
159 	 */
160 	if ((error = bus_dmamem_alloc(sc->sc_dmatag,
161 	    sizeof(struct gem_control_data), PAGE_SIZE, 0, &sc->sc_cdseg,
162 	    1, &sc->sc_cdnseg, 0)) != 0) {
163 		printf("%s: unable to allocate control data, error = %d\n",
164 		    sc->sc_dev.dv_xname, error);
165 		goto fail_0;
166 	}
167 
168 	/* XXX should map this in with correct endianness */
169 	if ((error = bus_dmamem_map(sc->sc_dmatag, &sc->sc_cdseg, sc->sc_cdnseg,
170 	    sizeof(struct gem_control_data), (caddr_t *)&sc->sc_control_data,
171 	    BUS_DMA_COHERENT)) != 0) {
172 		printf("%s: unable to map control data, error = %d\n",
173 		    sc->sc_dev.dv_xname, error);
174 		goto fail_1;
175 	}
176 
177 	if ((error = bus_dmamap_create(sc->sc_dmatag,
178 	    sizeof(struct gem_control_data), 1,
179 	    sizeof(struct gem_control_data), 0, 0, &sc->sc_cddmamap)) != 0) {
180 		printf("%s: unable to create control data DMA map, "
181 		    "error = %d\n", sc->sc_dev.dv_xname, error);
182 		goto fail_2;
183 	}
184 
185 	if ((error = bus_dmamap_load(sc->sc_dmatag, sc->sc_cddmamap,
186 	    sc->sc_control_data, sizeof(struct gem_control_data), NULL,
187 	    0)) != 0) {
188 		printf("%s: unable to load control data DMA map, error = %d\n",
189 		    sc->sc_dev.dv_xname, error);
190 		goto fail_3;
191 	}
192 
193 	/*
194 	 * Create the receive buffer DMA maps.
195 	 */
196 	for (i = 0; i < GEM_NRXDESC; i++) {
197 		if ((error = bus_dmamap_create(sc->sc_dmatag, MCLBYTES, 1,
198 		    MCLBYTES, 0, 0, &sc->sc_rxsoft[i].rxs_dmamap)) != 0) {
199 			printf("%s: unable to create rx DMA map %d, "
200 			    "error = %d\n", sc->sc_dev.dv_xname, i, error);
201 			goto fail_5;
202 		}
203 		sc->sc_rxsoft[i].rxs_mbuf = NULL;
204 	}
205 
206 	/*
207 	 * From this point forward, the attachment cannot fail.  A failure
208 	 * before this point releases all resources that may have been
209 	 * allocated.
210 	 */
211 
212 	/* Announce ourselves. */
213 	printf("%s: Ethernet address %s\n", sc->sc_dev.dv_xname,
214 	    ether_sprintf(sc->sc_enaddr));
215 
216 	/* Initialize ifnet structure. */
217 	strcpy(ifp->if_xname, sc->sc_dev.dv_xname);
218 	ifp->if_softc = sc;
219 	ifp->if_flags =
220 	    IFF_BROADCAST | IFF_SIMPLEX | IFF_NOTRAILERS | IFF_MULTICAST;
221 	ifp->if_start = gem_start;
222 	ifp->if_ioctl = gem_ioctl;
223 	ifp->if_watchdog = gem_watchdog;
224 	IFQ_SET_READY(&ifp->if_snd);
225 
226 	/* Initialize ifmedia structures and MII info */
227 	mii->mii_ifp = ifp;
228 	mii->mii_readreg = gem_mii_readreg;
229 	mii->mii_writereg = gem_mii_writereg;
230 	mii->mii_statchg = gem_mii_statchg;
231 
232 	ifmedia_init(&mii->mii_media, 0, gem_mediachange, gem_mediastatus);
233 
234 	gem_mifinit(sc);
235 
236 	mii_attach(&sc->sc_dev, mii, 0xffffffff,
237 			MII_PHY_ANY, MII_OFFSET_ANY, 0);
238 
239 	child = LIST_FIRST(&mii->mii_phys);
240 	if (child == NULL) {
241 		/* No PHY attached */
242 		ifmedia_add(&sc->sc_media, IFM_ETHER|IFM_MANUAL, 0, NULL);
243 		ifmedia_set(&sc->sc_media, IFM_ETHER|IFM_MANUAL);
244 	} else {
245 		/*
246 		 * Walk along the list of attached MII devices and
247 		 * establish an `MII instance' to `phy number'
248 		 * mapping. We'll use this mapping in media change
249 		 * requests to determine which phy to use to program
250 		 * the MIF configuration register.
251 		 */
252 		for (; child != NULL; child = LIST_NEXT(child, mii_list)) {
253 			/*
254 			 * Note: we support just two PHYs: the built-in
255 			 * internal device and an external on the MII
256 			 * connector.
257 			 */
258 			if (child->mii_phy > 1 || child->mii_inst > 1) {
259 				printf("%s: cannot accomodate MII device %s"
260 				       " at phy %d, instance %d\n",
261 				       sc->sc_dev.dv_xname,
262 				       child->mii_dev.dv_xname,
263 				       child->mii_phy, child->mii_inst);
264 				continue;
265 			}
266 
267 			sc->sc_phys[child->mii_inst] = child->mii_phy;
268 		}
269 
270 		/*
271 		 * Now select and activate the PHY we will use.
272 		 *
273 		 * The order of preference is External (MDI1),
274 		 * Internal (MDI0), Serial Link (no MII).
275 		 */
276 		if (sc->sc_phys[1]) {
277 #ifdef DEBUG
278 			printf("using external phy\n");
279 #endif
280 			sc->sc_mif_config |= GEM_MIF_CONFIG_PHY_SEL;
281 		} else {
282 #ifdef DEBUG
283 			printf("using internal phy\n");
284 #endif
285 			sc->sc_mif_config &= ~GEM_MIF_CONFIG_PHY_SEL;
286 		}
287 		bus_space_write_4(sc->sc_bustag, sc->sc_h, GEM_MIF_CONFIG,
288 			sc->sc_mif_config);
289 
290 		/*
291 		 * XXX - we can really do the following ONLY if the
292 		 * phy indeed has the auto negotiation capability!!
293 		 */
294 		ifmedia_set(&sc->sc_media, IFM_ETHER|IFM_AUTO);
295 	}
296 
297 	/* Attach the interface. */
298 	if_attach(ifp);
299 	ether_ifattach(ifp);
300 
301 	sc->sc_sh = shutdownhook_establish(gem_shutdown, sc);
302 	if (sc->sc_sh == NULL)
303 		panic("gem_config: can't establish shutdownhook");
304 
305 	timeout_set(&sc->sc_tick_ch, gem_tick, sc);
306 	return;
307 
308 	/*
309 	 * Free any resources we've allocated during the failed attach
310 	 * attempt.  Do this in reverse order and fall through.
311 	 */
312  fail_5:
313 	for (i = 0; i < GEM_NRXDESC; i++) {
314 		if (sc->sc_rxsoft[i].rxs_dmamap != NULL)
315 			bus_dmamap_destroy(sc->sc_dmatag,
316 			    sc->sc_rxsoft[i].rxs_dmamap);
317 	}
318 	bus_dmamap_unload(sc->sc_dmatag, sc->sc_cddmamap);
319  fail_3:
320 	bus_dmamap_destroy(sc->sc_dmatag, sc->sc_cddmamap);
321  fail_2:
322 	bus_dmamem_unmap(sc->sc_dmatag, (caddr_t)sc->sc_control_data,
323 	    sizeof(struct gem_control_data));
324  fail_1:
325 	bus_dmamem_free(sc->sc_dmatag, &sc->sc_cdseg, sc->sc_cdnseg);
326  fail_0:
327 	return;
328 }
329 
330 
331 void
332 gem_tick(arg)
333 	void *arg;
334 {
335 	struct gem_softc *sc = arg;
336 	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
337 	bus_space_tag_t t = sc->sc_bustag;
338 	bus_space_handle_t mac = sc->sc_h;
339 	int s;
340 
341 	s = splimp();
342 
343 	/* unload collisions counters */
344 	ifp->if_collisions +=
345 	    bus_space_read_4(t, mac, GEM_MAC_NORM_COLL_CNT) +
346 	    bus_space_read_4(t, mac, GEM_MAC_FIRST_COLL_CNT) +
347 	    bus_space_read_4(t, mac, GEM_MAC_EXCESS_COLL_CNT) +
348 	    bus_space_read_4(t, mac, GEM_MAC_LATE_COLL_CNT);
349 
350 	/* clear the hardware counters */
351 	bus_space_write_4(t, mac, GEM_MAC_NORM_COLL_CNT, 0);
352 	bus_space_write_4(t, mac, GEM_MAC_FIRST_COLL_CNT, 0);
353 	bus_space_write_4(t, mac, GEM_MAC_EXCESS_COLL_CNT, 0);
354 	bus_space_write_4(t, mac, GEM_MAC_LATE_COLL_CNT, 0);
355 
356 	mii_tick(&sc->sc_mii);
357 	splx(s);
358 
359 	timeout_add(&sc->sc_tick_ch, hz);
360 }
361 
362 void
363 gem_reset(sc)
364 	struct gem_softc *sc;
365 {
366 	bus_space_tag_t t = sc->sc_bustag;
367 	bus_space_handle_t h = sc->sc_h;
368 	int i;
369 	int s;
370 
371 	s = splimp();
372 	DPRINTF(sc, ("%s: gem_reset\n", sc->sc_dev.dv_xname));
373 	gem_reset_rx(sc);
374 	gem_reset_tx(sc);
375 
376 	/* Do a full reset */
377 	bus_space_write_4(t, h, GEM_RESET, GEM_RESET_RX|GEM_RESET_TX);
378 	for (i=TRIES; i--; delay(100))
379 		if ((bus_space_read_4(t, h, GEM_RESET) &
380 			(GEM_RESET_RX|GEM_RESET_TX)) == 0)
381 			break;
382 	if ((bus_space_read_4(t, h, GEM_RESET) &
383 		(GEM_RESET_RX|GEM_RESET_TX)) != 0) {
384 		printf("%s: cannot reset device\n",
385 			sc->sc_dev.dv_xname);
386 	}
387 	splx(s);
388 }
389 
390 
391 /*
392  * gem_rxdrain:
393  *
394  *	Drain the receive queue.
395  */
396 void
397 gem_rxdrain(struct gem_softc *sc)
398 {
399 	struct gem_rxsoft *rxs;
400 	int i;
401 
402 	for (i = 0; i < GEM_NRXDESC; i++) {
403 		rxs = &sc->sc_rxsoft[i];
404 		if (rxs->rxs_mbuf != NULL) {
405 			bus_dmamap_unload(sc->sc_dmatag, rxs->rxs_dmamap);
406 			m_freem(rxs->rxs_mbuf);
407 			rxs->rxs_mbuf = NULL;
408 		}
409 	}
410 }
411 
412 /*
413  * Reset the whole thing.
414  */
415 void
416 gem_stop(struct ifnet *ifp, int disable)
417 {
418 	struct gem_softc *sc = (struct gem_softc *)ifp->if_softc;
419 	struct gem_sxd *sd;
420 	u_int32_t i;
421 
422 	DPRINTF(sc, ("%s: gem_stop\n", sc->sc_dev.dv_xname));
423 
424 	timeout_del(&sc->sc_tick_ch);
425 	mii_down(&sc->sc_mii);
426 
427 	/* XXX - Should we reset these instead? */
428 	gem_disable_rx(sc);
429 	gem_disable_rx(sc);
430 
431 	/*
432 	 * Release any queued transmit buffers.
433 	 */
434 	for (i = 0; i < GEM_NTXDESC; i++) {
435 		sd = &sc->sc_txd[i];
436 		if (sd->sd_map != NULL) {
437 			bus_dmamap_sync(sc->sc_dmatag, sd->sd_map, 0,
438 			    sd->sd_map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
439 			bus_dmamap_unload(sc->sc_dmatag, sd->sd_map);
440 			bus_dmamap_destroy(sc->sc_dmatag, sd->sd_map);
441 			sd->sd_map = NULL;
442 		}
443 		if (sd->sd_mbuf != NULL) {
444 			m_freem(sd->sd_mbuf);
445 			sd->sd_mbuf = NULL;
446 		}
447 	}
448 	sc->sc_tx_cnt = sc->sc_tx_prod = sc->sc_tx_cons = 0;
449 
450 	if (disable) {
451 		gem_rxdrain(sc);
452 	}
453 
454 	/*
455 	 * Mark the interface down and cancel the watchdog timer.
456 	 */
457 	ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
458 	ifp->if_timer = 0;
459 }
460 
461 
462 /*
463  * Reset the receiver
464  */
465 int
466 gem_reset_rx(struct gem_softc *sc)
467 {
468 	bus_space_tag_t t = sc->sc_bustag;
469 	bus_space_handle_t h = sc->sc_h;
470 	int i;
471 
472 
473 	/*
474 	 * Resetting while DMA is in progress can cause a bus hang, so we
475 	 * disable DMA first.
476 	 */
477 	gem_disable_rx(sc);
478 	bus_space_write_4(t, h, GEM_RX_CONFIG, 0);
479 	/* Wait till it finishes */
480 	for (i = TRIES; i--; delay(100))
481 		if ((bus_space_read_4(t, h, GEM_RX_CONFIG) & 1) == 0)
482 			break;
483 	if ((bus_space_read_4(t, h, GEM_RX_CONFIG) & 1) != 0)
484 		printf("%s: cannot disable read dma\n",
485 			sc->sc_dev.dv_xname);
486 
487 	/* Wait 5ms extra. */
488 	delay(5000);
489 
490 	/* Finally, reset the ERX */
491 	bus_space_write_4(t, h, GEM_RESET, GEM_RESET_RX);
492 	/* Wait till it finishes */
493 	for (i = TRIES; i--; delay(100))
494 		if ((bus_space_read_4(t, h, GEM_RESET) & GEM_RESET_RX) == 0)
495 			break;
496 	if ((bus_space_read_4(t, h, GEM_RESET) & GEM_RESET_RX) != 0) {
497 		printf("%s: cannot reset receiver\n",
498 			sc->sc_dev.dv_xname);
499 		return (1);
500 	}
501 	return (0);
502 }
503 
504 
505 /*
506  * Reset the transmitter
507  */
508 int
509 gem_reset_tx(struct gem_softc *sc)
510 {
511 	bus_space_tag_t t = sc->sc_bustag;
512 	bus_space_handle_t h = sc->sc_h;
513 	int i;
514 
515 	/*
516 	 * Resetting while DMA is in progress can cause a bus hang, so we
517 	 * disable DMA first.
518 	 */
519 	gem_disable_tx(sc);
520 	bus_space_write_4(t, h, GEM_TX_CONFIG, 0);
521 	/* Wait till it finishes */
522 	for (i = TRIES; i--; delay(100))
523 		if ((bus_space_read_4(t, h, GEM_TX_CONFIG) & 1) == 0)
524 			break;
525 	if ((bus_space_read_4(t, h, GEM_TX_CONFIG) & 1) != 0)
526 		printf("%s: cannot disable read dma\n",
527 			sc->sc_dev.dv_xname);
528 
529 	/* Wait 5ms extra. */
530 	delay(5000);
531 
532 	/* Finally, reset the ETX */
533 	bus_space_write_4(t, h, GEM_RESET, GEM_RESET_TX);
534 	/* Wait till it finishes */
535 	for (i = TRIES; i--; delay(100))
536 		if ((bus_space_read_4(t, h, GEM_RESET) & GEM_RESET_TX) == 0)
537 			break;
538 	if ((bus_space_read_4(t, h, GEM_RESET) & GEM_RESET_TX) != 0) {
539 		printf("%s: cannot reset receiver\n",
540 			sc->sc_dev.dv_xname);
541 		return (1);
542 	}
543 	return (0);
544 }
545 
546 /*
547  * disable receiver.
548  */
549 int
550 gem_disable_rx(struct gem_softc *sc)
551 {
552 	bus_space_tag_t t = sc->sc_bustag;
553 	bus_space_handle_t h = sc->sc_h;
554 	int i;
555 	u_int32_t cfg;
556 
557 	/* Flip the enable bit */
558 	cfg = bus_space_read_4(t, h, GEM_MAC_RX_CONFIG);
559 	cfg &= ~GEM_MAC_RX_ENABLE;
560 	bus_space_write_4(t, h, GEM_MAC_RX_CONFIG, cfg);
561 
562 	/* Wait for it to finish */
563 	for (i = TRIES; i--; delay(100))
564 		if ((bus_space_read_4(t, h, GEM_MAC_RX_CONFIG) &
565 			GEM_MAC_RX_ENABLE) == 0)
566 			return (0);
567 	return (1);
568 }
569 
570 /*
571  * disable transmitter.
572  */
573 int
574 gem_disable_tx(struct gem_softc *sc)
575 {
576 	bus_space_tag_t t = sc->sc_bustag;
577 	bus_space_handle_t h = sc->sc_h;
578 	int i;
579 	u_int32_t cfg;
580 
581 	/* Flip the enable bit */
582 	cfg = bus_space_read_4(t, h, GEM_MAC_TX_CONFIG);
583 	cfg &= ~GEM_MAC_TX_ENABLE;
584 	bus_space_write_4(t, h, GEM_MAC_TX_CONFIG, cfg);
585 
586 	/* Wait for it to finish */
587 	for (i = TRIES; i--; delay(100))
588 		if ((bus_space_read_4(t, h, GEM_MAC_TX_CONFIG) &
589 			GEM_MAC_TX_ENABLE) == 0)
590 			return (0);
591 	return (1);
592 }
593 
594 /*
595  * Initialize interface.
596  */
597 int
598 gem_meminit(struct gem_softc *sc)
599 {
600 	struct gem_rxsoft *rxs;
601 	int i, error;
602 
603 	/*
604 	 * Initialize the transmit descriptor ring.
605 	 */
606 	memset((void *)sc->sc_txdescs, 0, sizeof(sc->sc_txdescs));
607 	for (i = 0; i < GEM_NTXDESC; i++) {
608 		sc->sc_txdescs[i].gd_flags = 0;
609 		sc->sc_txdescs[i].gd_addr = 0;
610 	}
611 	GEM_CDTXSYNC(sc, 0, GEM_NTXDESC,
612 	    BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
613 
614 	/*
615 	 * Initialize the receive descriptor and receive job
616 	 * descriptor rings.
617 	 */
618 	for (i = 0; i < GEM_NRXDESC; i++) {
619 		rxs = &sc->sc_rxsoft[i];
620 		if (rxs->rxs_mbuf == NULL) {
621 			if ((error = gem_add_rxbuf(sc, i)) != 0) {
622 				printf("%s: unable to allocate or map rx "
623 				    "buffer %d, error = %d\n",
624 				    sc->sc_dev.dv_xname, i, error);
625 				/*
626 				 * XXX Should attempt to run with fewer receive
627 				 * XXX buffers instead of just failing.
628 				 */
629 				gem_rxdrain(sc);
630 				return (1);
631 			}
632 		} else
633 			GEM_INIT_RXDESC(sc, i);
634 	}
635 	sc->sc_rxptr = 0;
636 
637 	return (0);
638 }
639 
640 static int
641 gem_ringsize(int sz)
642 {
643 	int v;
644 
645 	switch (sz) {
646 	case 32:
647 		v = GEM_RING_SZ_32;
648 		break;
649 	case 64:
650 		v = GEM_RING_SZ_64;
651 		break;
652 	case 128:
653 		v = GEM_RING_SZ_128;
654 		break;
655 	case 256:
656 		v = GEM_RING_SZ_256;
657 		break;
658 	case 512:
659 		v = GEM_RING_SZ_512;
660 		break;
661 	case 1024:
662 		v = GEM_RING_SZ_1024;
663 		break;
664 	case 2048:
665 		v = GEM_RING_SZ_2048;
666 		break;
667 	case 4096:
668 		v = GEM_RING_SZ_4096;
669 		break;
670 	case 8192:
671 		v = GEM_RING_SZ_8192;
672 		break;
673 	default:
674 		v = GEM_RING_SZ_32;
675 		printf("gem: invalid Receive Descriptor ring size\n");
676 		break;
677 	}
678 	return (v);
679 }
680 
681 /*
682  * Initialization of interface; set up initialization block
683  * and transmit/receive descriptor rings.
684  */
685 int
686 gem_init(struct ifnet *ifp)
687 {
688 	struct gem_softc *sc = (struct gem_softc *)ifp->if_softc;
689 	bus_space_tag_t t = sc->sc_bustag;
690 	bus_space_handle_t h = sc->sc_h;
691 	int s;
692 	u_int32_t v;
693 
694 	s = splimp();
695 
696 	DPRINTF(sc, ("%s: gem_init: calling stop\n", sc->sc_dev.dv_xname));
697 	/*
698 	 * Initialization sequence. The numbered steps below correspond
699 	 * to the sequence outlined in section 6.3.5.1 in the Ethernet
700 	 * Channel Engine manual (part of the PCIO manual).
701 	 * See also the STP2002-STQ document from Sun Microsystems.
702 	 */
703 
704 	/* step 1 & 2. Reset the Ethernet Channel */
705 	gem_stop(ifp, 0);
706 	gem_reset(sc);
707 	DPRINTF(sc, ("%s: gem_init: restarting\n", sc->sc_dev.dv_xname));
708 
709 	/* Re-initialize the MIF */
710 	gem_mifinit(sc);
711 
712 	/* Call MI reset function if any */
713 	if (sc->sc_hwreset)
714 		(*sc->sc_hwreset)(sc);
715 
716 	/* step 3. Setup data structures in host memory */
717 	gem_meminit(sc);
718 
719 	/* step 4. TX MAC registers & counters */
720 	gem_init_regs(sc);
721 	v = (GEM_MTU) | (0x2000 << 16) /* Burst size */;
722 	bus_space_write_4(t, h, GEM_MAC_MAC_MAX_FRAME, v);
723 
724 	/* step 5. RX MAC registers & counters */
725 	gem_setladrf(sc);
726 
727 	/* step 6 & 7. Program Descriptor Ring Base Addresses */
728 	bus_space_write_4(t, h, GEM_TX_RING_PTR_HI,
729 	    (((uint64_t)GEM_CDTXADDR(sc,0)) >> 32));
730 	bus_space_write_4(t, h, GEM_TX_RING_PTR_LO, GEM_CDTXADDR(sc, 0));
731 
732 	bus_space_write_4(t, h, GEM_RX_RING_PTR_HI,
733 	    (((uint64_t)GEM_CDRXADDR(sc,0)) >> 32));
734 	bus_space_write_4(t, h, GEM_RX_RING_PTR_LO, GEM_CDRXADDR(sc, 0));
735 
736 	/* step 8. Global Configuration & Interrupt Mask */
737 	bus_space_write_4(t, h, GEM_INTMASK,
738 		      ~(GEM_INTR_TX_INTME|
739 			GEM_INTR_TX_EMPTY|
740 			GEM_INTR_RX_DONE|GEM_INTR_RX_NOBUF|
741 			GEM_INTR_RX_TAG_ERR|GEM_INTR_PCS|
742 			GEM_INTR_MAC_CONTROL|GEM_INTR_MIF|
743 			GEM_INTR_BERR));
744 	bus_space_write_4(t, h, GEM_MAC_RX_MASK, 0); /* XXXX */
745 	bus_space_write_4(t, h, GEM_MAC_TX_MASK, 0xffff); /* XXXX */
746 	bus_space_write_4(t, h, GEM_MAC_CONTROL_MASK, 0); /* XXXX */
747 
748 	/* step 9. ETX Configuration: use mostly default values */
749 
750 	/* Enable DMA */
751 	bus_space_write_4(t, h, GEM_TX_KICK, 0);
752 	v = gem_ringsize(GEM_NTXDESC /*XXX*/);
753 	bus_space_write_4(t, h, GEM_TX_CONFIG,
754 		v|GEM_TX_CONFIG_TXDMA_EN|
755 		((0x400<<10)&GEM_TX_CONFIG_TXFIFO_TH));
756 
757 	/* step 10. ERX Configuration */
758 
759 	/* Encode Receive Descriptor ring size: four possible values */
760 	v = gem_ringsize(GEM_NRXDESC /*XXX*/);
761 
762 	/* Enable DMA */
763 	bus_space_write_4(t, h, GEM_RX_CONFIG,
764 		v|(GEM_THRSH_1024<<GEM_RX_CONFIG_FIFO_THRS_SHIFT)|
765 		(2<<GEM_RX_CONFIG_FBOFF_SHFT)|GEM_RX_CONFIG_RXDMA_EN|
766 		(0<<GEM_RX_CONFIG_CXM_START_SHFT));
767 	/*
768 	 * The following value is for an OFF Threshold of about 15.5 Kbytes
769 	 * and an ON Threshold of 4K bytes.
770 	 */
771 	bus_space_write_4(t, h, GEM_RX_PAUSE_THRESH, 0xf8 | (0x40 << 12));
772 	bus_space_write_4(t, h, GEM_RX_BLANKING, (2<<12)|6);
773 
774 	/* step 11. Configure Media */
775 	gem_mii_statchg(&sc->sc_dev);
776 
777 	/* step 12. RX_MAC Configuration Register */
778 	v = bus_space_read_4(t, h, GEM_MAC_RX_CONFIG);
779 	v |= GEM_MAC_RX_ENABLE;
780 	bus_space_write_4(t, h, GEM_MAC_RX_CONFIG, v);
781 
782 	/* step 14. Issue Transmit Pending command */
783 
784 	/* Call MI initialization function if any */
785 	if (sc->sc_hwinit)
786 		(*sc->sc_hwinit)(sc);
787 
788 
789 	/* step 15.  Give the reciever a swift kick */
790 	bus_space_write_4(t, h, GEM_RX_KICK, GEM_NRXDESC-4);
791 
792 	/* Start the one second timer. */
793 	timeout_add(&sc->sc_tick_ch, hz);
794 
795 	ifp->if_flags |= IFF_RUNNING;
796 	ifp->if_flags &= ~IFF_OACTIVE;
797 	ifp->if_timer = 0;
798 	splx(s);
799 
800 	return (0);
801 }
802 
803 /*
804  * Compare two Ether/802 addresses for equality, inlined and unrolled for
805  * speed.
806  */
807 static __inline__ int
808 ether_cmp(a, b)
809 	u_char *a, *b;
810 {
811 
812 	if (a[5] != b[5] || a[4] != b[4] || a[3] != b[3] ||
813 	    a[2] != b[2] || a[1] != b[1] || a[0] != b[0])
814 		return (0);
815 	return (1);
816 }
817 
818 
819 void
820 gem_init_regs(struct gem_softc *sc)
821 {
822 	bus_space_tag_t t = sc->sc_bustag;
823 	bus_space_handle_t h = sc->sc_h;
824 	u_int32_t v;
825 
826 	/* These regs are not cleared on reset */
827 	sc->sc_inited = 0;
828 	if (!sc->sc_inited) {
829 
830 		/* Wooo.  Magic values. */
831 		bus_space_write_4(t, h, GEM_MAC_IPG0, 0);
832 		bus_space_write_4(t, h, GEM_MAC_IPG1, 8);
833 		bus_space_write_4(t, h, GEM_MAC_IPG2, 4);
834 
835 		bus_space_write_4(t, h, GEM_MAC_MAC_MIN_FRAME, ETHER_MIN_LEN);
836 		/* Max frame and max burst size */
837 		v = (GEM_MTU) | (0x2000 << 16) /* Burst size */;
838 		bus_space_write_4(t, h, GEM_MAC_MAC_MAX_FRAME, v);
839 		bus_space_write_4(t, h, GEM_MAC_PREAMBLE_LEN, 0x7);
840 		bus_space_write_4(t, h, GEM_MAC_JAM_SIZE, 0x4);
841 		bus_space_write_4(t, h, GEM_MAC_ATTEMPT_LIMIT, 0x10);
842 		/* Dunno.... */
843 		bus_space_write_4(t, h, GEM_MAC_CONTROL_TYPE, 0x8088);
844 		bus_space_write_4(t, h, GEM_MAC_RANDOM_SEED,
845 			((sc->sc_enaddr[5]<<8)|sc->sc_enaddr[4])&0x3ff);
846 		/* Secondary MAC addr set to 0:0:0:0:0:0 */
847 		bus_space_write_4(t, h, GEM_MAC_ADDR3, 0);
848 		bus_space_write_4(t, h, GEM_MAC_ADDR4, 0);
849 		bus_space_write_4(t, h, GEM_MAC_ADDR5, 0);
850 		/* MAC control addr set to 0:1:c2:0:1:80 */
851 		bus_space_write_4(t, h, GEM_MAC_ADDR6, 0x0001);
852 		bus_space_write_4(t, h, GEM_MAC_ADDR7, 0xc200);
853 		bus_space_write_4(t, h, GEM_MAC_ADDR8, 0x0180);
854 
855 		/* MAC filter addr set to 0:0:0:0:0:0 */
856 		bus_space_write_4(t, h, GEM_MAC_ADDR_FILTER0, 0);
857 		bus_space_write_4(t, h, GEM_MAC_ADDR_FILTER1, 0);
858 		bus_space_write_4(t, h, GEM_MAC_ADDR_FILTER2, 0);
859 
860 		bus_space_write_4(t, h, GEM_MAC_ADR_FLT_MASK1_2, 0);
861 		bus_space_write_4(t, h, GEM_MAC_ADR_FLT_MASK0, 0);
862 
863 		sc->sc_inited = 1;
864 	}
865 
866 	/* Counters need to be zeroed */
867 	bus_space_write_4(t, h, GEM_MAC_NORM_COLL_CNT, 0);
868 	bus_space_write_4(t, h, GEM_MAC_FIRST_COLL_CNT, 0);
869 	bus_space_write_4(t, h, GEM_MAC_EXCESS_COLL_CNT, 0);
870 	bus_space_write_4(t, h, GEM_MAC_LATE_COLL_CNT, 0);
871 	bus_space_write_4(t, h, GEM_MAC_DEFER_TMR_CNT, 0);
872 	bus_space_write_4(t, h, GEM_MAC_PEAK_ATTEMPTS, 0);
873 	bus_space_write_4(t, h, GEM_MAC_RX_FRAME_COUNT, 0);
874 	bus_space_write_4(t, h, GEM_MAC_RX_LEN_ERR_CNT, 0);
875 	bus_space_write_4(t, h, GEM_MAC_RX_ALIGN_ERR, 0);
876 	bus_space_write_4(t, h, GEM_MAC_RX_CRC_ERR_CNT, 0);
877 	bus_space_write_4(t, h, GEM_MAC_RX_CODE_VIOL, 0);
878 
879 	/* Un-pause stuff */
880 	bus_space_write_4(t, h, GEM_MAC_SEND_PAUSE_CMD, 0);
881 
882 	/*
883 	 * Set the station address.
884 	 */
885 	bus_space_write_4(t, h, GEM_MAC_ADDR0,
886 		(sc->sc_enaddr[4]<<8) | sc->sc_enaddr[5]);
887 	bus_space_write_4(t, h, GEM_MAC_ADDR1,
888 		(sc->sc_enaddr[2]<<8) | sc->sc_enaddr[3]);
889 	bus_space_write_4(t, h, GEM_MAC_ADDR2,
890 		(sc->sc_enaddr[0]<<8) | sc->sc_enaddr[1]);
891 
892 }
893 
894 /*
895  * Receive interrupt.
896  */
897 int
898 gem_rint(sc)
899 	struct gem_softc *sc;
900 {
901 	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
902 	bus_space_tag_t t = sc->sc_bustag;
903 	bus_space_handle_t h = sc->sc_h;
904 	struct ether_header *eh;
905 	struct gem_rxsoft *rxs;
906 	struct mbuf *m;
907 	u_int64_t rxstat;
908 	int i, len;
909 
910 	/*
911 	 * XXXX Read the lastrx only once at the top for speed.
912 	 */
913 	DPRINTF(sc, ("gem_rint: sc->rxptr %d, complete %d\n",
914 		sc->sc_rxptr, bus_space_read_4(t, h, GEM_RX_COMPLETION)));
915 	for (i = sc->sc_rxptr; i != bus_space_read_4(t, h, GEM_RX_COMPLETION);
916 	     i = GEM_NEXTRX(i)) {
917 		rxs = &sc->sc_rxsoft[i];
918 
919 		GEM_CDRXSYNC(sc, i,
920 		    BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
921 
922 		rxstat = GEM_DMA_READ(sc, sc->sc_rxdescs[i].gd_flags);
923 
924 		if (rxstat & GEM_RD_OWN) {
925 			printf("gem_rint: completed descriptor "
926 				"still owned %d\n", i);
927 			/*
928 			 * We have processed all of the receive buffers.
929 			 */
930 			break;
931 		}
932 
933 		if (rxstat & GEM_RD_BAD_CRC) {
934 			printf("%s: receive error: CRC error\n",
935 				sc->sc_dev.dv_xname);
936 			GEM_INIT_RXDESC(sc, i);
937 			continue;
938 		}
939 
940 		bus_dmamap_sync(sc->sc_dmatag, rxs->rxs_dmamap, 0,
941 		    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
942 #ifdef GEM_DEBUG
943 		if (ifp->if_flags & IFF_DEBUG) {
944 			printf("    rxsoft %p descriptor %d: ", rxs, i);
945 			printf("gd_flags: 0x%016llx\t", (long long)
946 				GEM_DMA_READ(sc, sc->sc_rxdescs[i].gd_flags));
947 			printf("gd_addr: 0x%016llx\n", (long long)
948 				GEM_DMA_READ(sc, sc->sc_rxdescs[i].gd_addr));
949 		}
950 #endif
951 
952 		/*
953 		 * No errors; receive the packet.  Note the Gem
954 		 * includes the CRC with every packet.
955 		 */
956 		len = GEM_RD_BUFLEN(rxstat);
957 
958 		/*
959 		 * Allocate a new mbuf cluster.  If that fails, we are
960 		 * out of memory, and must drop the packet and recycle
961 		 * the buffer that's already attached to this descriptor.
962 		 */
963 		m = rxs->rxs_mbuf;
964 		if (gem_add_rxbuf(sc, i) != 0) {
965 			ifp->if_ierrors++;
966 			GEM_INIT_RXDESC(sc, i);
967 			bus_dmamap_sync(sc->sc_dmatag, rxs->rxs_dmamap, 0,
968 			    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
969 			continue;
970 		}
971 		m->m_data += 2; /* We're already off by two */
972 
973 		ifp->if_ipackets++;
974 		eh = mtod(m, struct ether_header *);
975 		m->m_pkthdr.rcvif = ifp;
976 		m->m_pkthdr.len = m->m_len = len;
977 
978 #if NBPFILTER > 0
979 		/*
980 		 * Pass this up to any BPF listeners, but only
981 		 * pass it up the stack if its for us.
982 		 */
983 		if (ifp->if_bpf)
984 			bpf_mtap(ifp->if_bpf, m);
985 #endif /* NPBFILTER > 0 */
986 
987 		/* Pass it on. */
988 		ether_input_mbuf(ifp, m);
989 	}
990 
991 	/* Update the receive pointer. */
992 	sc->sc_rxptr = i;
993 	bus_space_write_4(t, h, GEM_RX_KICK, i);
994 
995 	DPRINTF(sc, ("gem_rint: done sc->rxptr %d, complete %d\n",
996 		sc->sc_rxptr, bus_space_read_4(t, h, GEM_RX_COMPLETION)));
997 
998 	return (1);
999 }
1000 
1001 
1002 /*
1003  * gem_add_rxbuf:
1004  *
1005  *	Add a receive buffer to the indicated descriptor.
1006  */
1007 int
1008 gem_add_rxbuf(struct gem_softc *sc, int idx)
1009 {
1010 	struct gem_rxsoft *rxs = &sc->sc_rxsoft[idx];
1011 	struct mbuf *m;
1012 	int error;
1013 
1014 	MGETHDR(m, M_DONTWAIT, MT_DATA);
1015 	if (m == NULL)
1016 		return (ENOBUFS);
1017 
1018 	MCLGET(m, M_DONTWAIT);
1019 	if ((m->m_flags & M_EXT) == 0) {
1020 		m_freem(m);
1021 		return (ENOBUFS);
1022 	}
1023 
1024 #ifdef GEM_DEBUG
1025 /* bzero the packet to check dma */
1026 	memset(m->m_ext.ext_buf, 0, m->m_ext.ext_size);
1027 #endif
1028 
1029 	if (rxs->rxs_mbuf != NULL)
1030 		bus_dmamap_unload(sc->sc_dmatag, rxs->rxs_dmamap);
1031 
1032 	rxs->rxs_mbuf = m;
1033 
1034 	error = bus_dmamap_load(sc->sc_dmatag, rxs->rxs_dmamap,
1035 	    m->m_ext.ext_buf, m->m_ext.ext_size, NULL,
1036 	    BUS_DMA_READ|BUS_DMA_NOWAIT);
1037 	if (error) {
1038 		printf("%s: can't load rx DMA map %d, error = %d\n",
1039 		    sc->sc_dev.dv_xname, idx, error);
1040 		panic("gem_add_rxbuf");	/* XXX */
1041 	}
1042 
1043 	bus_dmamap_sync(sc->sc_dmatag, rxs->rxs_dmamap, 0,
1044 	    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
1045 
1046 	GEM_INIT_RXDESC(sc, idx);
1047 
1048 	return (0);
1049 }
1050 
1051 
1052 int
1053 gem_eint(sc, status)
1054 	struct gem_softc *sc;
1055 	u_int status;
1056 {
1057 	if ((status & GEM_INTR_MIF) != 0) {
1058 		printf("%s: XXXlink status changed\n", sc->sc_dev.dv_xname);
1059 		return (1);
1060 	}
1061 
1062 	printf("%s: status=%b\n", sc->sc_dev.dv_xname, status, GEM_INTR_BITS);
1063 	return (1);
1064 }
1065 
1066 
1067 int
1068 gem_intr(v)
1069 	void *v;
1070 {
1071 	struct gem_softc *sc = (struct gem_softc *)v;
1072 	bus_space_tag_t t = sc->sc_bustag;
1073 	bus_space_handle_t seb = sc->sc_h;
1074 	u_int32_t status;
1075 	int r = 0;
1076 
1077 	status = bus_space_read_4(t, seb, GEM_STATUS);
1078 	DPRINTF(sc, ("%s: gem_intr: cplt %xstatus %b\n",
1079 		sc->sc_dev.dv_xname, (status>>19), status, GEM_INTR_BITS));
1080 
1081 	if ((status & (GEM_INTR_RX_TAG_ERR | GEM_INTR_BERR)) != 0)
1082 		r |= gem_eint(sc, status);
1083 
1084 	if ((status & (GEM_INTR_TX_EMPTY | GEM_INTR_TX_INTME)) != 0)
1085 		r |= gem_tint(sc, status);
1086 
1087 	if ((status & (GEM_INTR_RX_DONE | GEM_INTR_RX_NOBUF)) != 0)
1088 		r |= gem_rint(sc);
1089 
1090 	/* We should eventually do more than just print out error stats. */
1091 	if (status & GEM_INTR_TX_MAC) {
1092 		int txstat = bus_space_read_4(t, seb, GEM_MAC_TX_STATUS);
1093 		if (txstat & ~GEM_MAC_TX_XMIT_DONE)
1094 			printf("MAC tx fault, status %x\n", txstat);
1095 	}
1096 	if (status & GEM_INTR_RX_MAC) {
1097 		int rxstat = bus_space_read_4(t, seb, GEM_MAC_RX_STATUS);
1098 		if (rxstat & ~GEM_MAC_RX_DONE)
1099 			printf("MAC rx fault, status %x\n", rxstat);
1100 	}
1101 	return (r);
1102 }
1103 
1104 
1105 void
1106 gem_watchdog(ifp)
1107 	struct ifnet *ifp;
1108 {
1109 	struct gem_softc *sc = ifp->if_softc;
1110 
1111 	DPRINTF(sc, ("gem_watchdog: GEM_RX_CONFIG %x GEM_MAC_RX_STATUS %x "
1112 		"GEM_MAC_RX_CONFIG %x\n",
1113 		bus_space_read_4(sc->sc_bustag, sc->sc_h, GEM_RX_CONFIG),
1114 		bus_space_read_4(sc->sc_bustag, sc->sc_h, GEM_MAC_RX_STATUS),
1115 		bus_space_read_4(sc->sc_bustag, sc->sc_h, GEM_MAC_RX_CONFIG)));
1116 
1117 	log(LOG_ERR, "%s: device timeout\n", sc->sc_dev.dv_xname);
1118 	++ifp->if_oerrors;
1119 
1120 	/* Try to get more packets going. */
1121 	gem_init(ifp);
1122 }
1123 
1124 /*
1125  * Initialize the MII Management Interface
1126  */
1127 void
1128 gem_mifinit(sc)
1129 	struct gem_softc *sc;
1130 {
1131 	bus_space_tag_t t = sc->sc_bustag;
1132 	bus_space_handle_t mif = sc->sc_h;
1133 
1134 	/* Configure the MIF in frame mode */
1135 	sc->sc_mif_config = bus_space_read_4(t, mif, GEM_MIF_CONFIG);
1136 	sc->sc_mif_config &= ~GEM_MIF_CONFIG_BB_ENA;
1137 	bus_space_write_4(t, mif, GEM_MIF_CONFIG, sc->sc_mif_config);
1138 }
1139 
1140 /*
1141  * MII interface
1142  *
1143  * The GEM MII interface supports at least three different operating modes:
1144  *
1145  * Bitbang mode is implemented using data, clock and output enable registers.
1146  *
1147  * Frame mode is implemented by loading a complete frame into the frame
1148  * register and polling the valid bit for completion.
1149  *
1150  * Polling mode uses the frame register but completion is indicated by
1151  * an interrupt.
1152  *
1153  */
1154 static int
1155 gem_mii_readreg(self, phy, reg)
1156 	struct device *self;
1157 	int phy, reg;
1158 {
1159 	struct gem_softc *sc = (void *)self;
1160 	bus_space_tag_t t = sc->sc_bustag;
1161 	bus_space_handle_t mif = sc->sc_h;
1162 	int n;
1163 	u_int32_t v;
1164 
1165 #ifdef GEM_DEBUG1
1166 	if (sc->sc_debug)
1167 		printf("gem_mii_readreg: phy %d reg %d\n", phy, reg);
1168 #endif
1169 
1170 	/* Construct the frame command */
1171 	v = (reg << GEM_MIF_REG_SHIFT)	| (phy << GEM_MIF_PHY_SHIFT) |
1172 		GEM_MIF_FRAME_READ;
1173 
1174 	bus_space_write_4(t, mif, GEM_MIF_FRAME, v);
1175 	for (n = 0; n < 100; n++) {
1176 		DELAY(1);
1177 		v = bus_space_read_4(t, mif, GEM_MIF_FRAME);
1178 		if (v & GEM_MIF_FRAME_TA0)
1179 			return (v & GEM_MIF_FRAME_DATA);
1180 	}
1181 
1182 	printf("%s: mii_read timeout\n", sc->sc_dev.dv_xname);
1183 	return (0);
1184 }
1185 
1186 static void
1187 gem_mii_writereg(self, phy, reg, val)
1188 	struct device *self;
1189 	int phy, reg, val;
1190 {
1191 	struct gem_softc *sc = (void *)self;
1192 	bus_space_tag_t t = sc->sc_bustag;
1193 	bus_space_handle_t mif = sc->sc_h;
1194 	int n;
1195 	u_int32_t v;
1196 
1197 #ifdef GEM_DEBUG1
1198 	if (sc->sc_debug)
1199 		printf("gem_mii_writereg: phy %d reg %d val %x\n",
1200 			phy, reg, val);
1201 #endif
1202 
1203 #if 0
1204 	/* Select the desired PHY in the MIF configuration register */
1205 	v = bus_space_read_4(t, mif, GEM_MIF_CONFIG);
1206 	/* Clear PHY select bit */
1207 	v &= ~GEM_MIF_CONFIG_PHY_SEL;
1208 	if (phy == GEM_PHYAD_EXTERNAL)
1209 		/* Set PHY select bit to get at external device */
1210 		v |= GEM_MIF_CONFIG_PHY_SEL;
1211 	bus_space_write_4(t, mif, GEM_MIF_CONFIG, v);
1212 #endif
1213 	/* Construct the frame command */
1214 	v = GEM_MIF_FRAME_WRITE			|
1215 	    (phy << GEM_MIF_PHY_SHIFT)		|
1216 	    (reg << GEM_MIF_REG_SHIFT)		|
1217 	    (val & GEM_MIF_FRAME_DATA);
1218 
1219 	bus_space_write_4(t, mif, GEM_MIF_FRAME, v);
1220 	for (n = 0; n < 100; n++) {
1221 		DELAY(1);
1222 		v = bus_space_read_4(t, mif, GEM_MIF_FRAME);
1223 		if (v & GEM_MIF_FRAME_TA0)
1224 			return;
1225 	}
1226 
1227 	printf("%s: mii_write timeout\n", sc->sc_dev.dv_xname);
1228 }
1229 
1230 static void
1231 gem_mii_statchg(dev)
1232 	struct device *dev;
1233 {
1234 	struct gem_softc *sc = (void *)dev;
1235 #ifdef GEM_DEBUG
1236 	int instance = IFM_INST(sc->sc_mii.mii_media.ifm_cur->ifm_media);
1237 #endif
1238 	bus_space_tag_t t = sc->sc_bustag;
1239 	bus_space_handle_t mac = sc->sc_h;
1240 	u_int32_t v;
1241 
1242 #ifdef GEM_DEBUG
1243 	if (sc->sc_debug)
1244 		printf("gem_mii_statchg: status change: phy = %d\n",
1245 		    sc->sc_phys[instance]);
1246 #endif
1247 
1248 
1249 	/* Set tx full duplex options */
1250 	bus_space_write_4(t, mac, GEM_MAC_TX_CONFIG, 0);
1251 	delay(10000); /* reg must be cleared and delay before changing. */
1252 	v = GEM_MAC_TX_ENA_IPG0|GEM_MAC_TX_NGU|GEM_MAC_TX_NGU_LIMIT|
1253 		GEM_MAC_TX_ENABLE;
1254 	if ((IFM_OPTIONS(sc->sc_mii.mii_media_active) & IFM_FDX) != 0) {
1255 		v |= GEM_MAC_TX_IGN_CARRIER|GEM_MAC_TX_IGN_COLLIS;
1256 	}
1257 	bus_space_write_4(t, mac, GEM_MAC_TX_CONFIG, v);
1258 
1259 	/* XIF Configuration */
1260  /* We should really calculate all this rather than rely on defaults */
1261 	v = bus_space_read_4(t, mac, GEM_MAC_XIF_CONFIG);
1262 	v = GEM_MAC_XIF_LINK_LED;
1263 	v |= GEM_MAC_XIF_TX_MII_ENA;
1264 	/* If an external transceiver is connected, enable its MII drivers */
1265 	sc->sc_mif_config = bus_space_read_4(t, mac, GEM_MIF_CONFIG);
1266 	if ((sc->sc_mif_config & GEM_MIF_CONFIG_MDI1) != 0) {
1267 		/* External MII needs echo disable if half duplex. */
1268 		if ((IFM_OPTIONS(sc->sc_mii.mii_media_active) & IFM_FDX) != 0)
1269 			/* turn on full duplex LED */
1270 			v |= GEM_MAC_XIF_FDPLX_LED;
1271  			else
1272 	 			/* half duplex -- disable echo */
1273 		 		v |= GEM_MAC_XIF_ECHO_DISABL;
1274 	} else
1275 		/* Internal MII needs buf enable */
1276 		v |= GEM_MAC_XIF_MII_BUF_ENA;
1277 	bus_space_write_4(t, mac, GEM_MAC_XIF_CONFIG, v);
1278 }
1279 
1280 int
1281 gem_mediachange(ifp)
1282 	struct ifnet *ifp;
1283 {
1284 	struct gem_softc *sc = ifp->if_softc;
1285 
1286 	if (IFM_TYPE(sc->sc_media.ifm_media) != IFM_ETHER)
1287 		return (EINVAL);
1288 
1289 	return (mii_mediachg(&sc->sc_mii));
1290 }
1291 
1292 void
1293 gem_mediastatus(ifp, ifmr)
1294 	struct ifnet *ifp;
1295 	struct ifmediareq *ifmr;
1296 {
1297 	struct gem_softc *sc = ifp->if_softc;
1298 
1299 	if ((ifp->if_flags & IFF_UP) == 0)
1300 		return;
1301 
1302 	mii_pollstat(&sc->sc_mii);
1303 	ifmr->ifm_active = sc->sc_mii.mii_media_active;
1304 	ifmr->ifm_status = sc->sc_mii.mii_media_status;
1305 }
1306 
1307 int gem_ioctldebug = 0;
1308 /*
1309  * Process an ioctl request.
1310  */
1311 int
1312 gem_ioctl(ifp, cmd, data)
1313 	struct ifnet *ifp;
1314 	u_long cmd;
1315 	caddr_t data;
1316 {
1317 	struct gem_softc *sc = ifp->if_softc;
1318 	struct ifaddr *ifa = (struct ifaddr *)data;
1319 	struct ifreq *ifr = (struct ifreq *)data;
1320 	int s, error = 0;
1321 
1322 	s = splimp();
1323 
1324 	switch (cmd) {
1325 
1326 	case SIOCSIFADDR:
1327 		ifp->if_flags |= IFF_UP;
1328 
1329 		switch (ifa->ifa_addr->sa_family) {
1330 #ifdef INET
1331 		case AF_INET:
1332 			gem_init(ifp);
1333 			arp_ifinit(&sc->sc_arpcom, ifa);
1334 			break;
1335 #endif
1336 #ifdef NS
1337 		case AF_NS:
1338 		    {
1339 			struct ns_addr *ina = &IA_SNS(ifa)->sns_addr;
1340 
1341 			if (ns_nullhost(*ina))
1342 				ina->x_host =
1343 				    *(union ns_host *)LLADDR(ifp->if_sadl);
1344 			else {
1345 				memcpy(LLADDR(ifp->if_sadl),
1346 				    ina->x_host.c_host, sizeof(sc->sc_enaddr));
1347 			}
1348 			/* Set new address. */
1349 			gem_init(ifp);
1350 			break;
1351 		    }
1352 #endif
1353 		default:
1354 			gem_init(ifp);
1355 			break;
1356 		}
1357 		break;
1358 
1359 	case SIOCSIFFLAGS:
1360 		if ((ifp->if_flags & IFF_UP) == 0 &&
1361 		    (ifp->if_flags & IFF_RUNNING) != 0) {
1362 			/*
1363 			 * If interface is marked down and it is running, then
1364 			 * stop it.
1365 			 */
1366 			gem_stop(ifp, 1);
1367 			ifp->if_flags &= ~IFF_RUNNING;
1368 		} else if ((ifp->if_flags & IFF_UP) != 0 &&
1369 		    	   (ifp->if_flags & IFF_RUNNING) == 0) {
1370 			/*
1371 			 * If interface is marked up and it is stopped, then
1372 			 * start it.
1373 			 */
1374 			gem_init(ifp);
1375 		} else if ((ifp->if_flags & IFF_UP) != 0) {
1376 			/*
1377 			 * Reset the interface to pick up changes in any other
1378 			 * flags that affect hardware registers.
1379 			 */
1380 			/*gem_stop(sc);*/
1381 			gem_init(ifp);
1382 		}
1383 #ifdef HMEDEBUG
1384 		sc->sc_debug = (ifp->if_flags & IFF_DEBUG) != 0 ? 1 : 0;
1385 #endif
1386 		break;
1387 
1388 	case SIOCADDMULTI:
1389 	case SIOCDELMULTI:
1390 		error = (cmd == SIOCADDMULTI) ?
1391 		    ether_addmulti(ifr, &sc->sc_arpcom) :
1392 		    ether_delmulti(ifr, &sc->sc_arpcom);
1393 
1394 		if (error == ENETRESET) {
1395 			/*
1396 			 * Multicast list has changed; set the hardware filter
1397 			 * accordingly.
1398 			 */
1399 			gem_setladrf(sc);
1400 			error = 0;
1401 		}
1402 		break;
1403 
1404 	case SIOCGIFMEDIA:
1405 	case SIOCSIFMEDIA:
1406 		error = ifmedia_ioctl(ifp, ifr, &sc->sc_media, cmd);
1407 		break;
1408 
1409 	default:
1410 		error = EINVAL;
1411 		break;
1412 	}
1413 
1414 	splx(s);
1415 	return (error);
1416 }
1417 
1418 
1419 void
1420 gem_shutdown(arg)
1421 	void *arg;
1422 {
1423 	struct gem_softc *sc = (struct gem_softc *)arg;
1424 	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
1425 
1426 	gem_stop(ifp, 1);
1427 }
1428 
1429 /*
1430  * Set up the logical address filter.
1431  */
1432 void
1433 gem_setladrf(sc)
1434 	struct gem_softc *sc;
1435 {
1436 	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
1437 	struct ether_multi *enm;
1438 	struct ether_multistep step;
1439 	struct arpcom *ac = &sc->sc_arpcom;
1440 	bus_space_tag_t t = sc->sc_bustag;
1441 	bus_space_handle_t h = sc->sc_h;
1442 	u_char *cp;
1443 	u_int32_t crc;
1444 	u_int32_t hash[16];
1445 	u_int32_t v;
1446 	int len;
1447 
1448 	/* Clear hash table */
1449 	memset(hash, 0, sizeof(hash));
1450 
1451 	/* Get current RX configuration */
1452 	v = bus_space_read_4(t, h, GEM_MAC_RX_CONFIG);
1453 
1454 	if ((ifp->if_flags & IFF_PROMISC) != 0) {
1455 		/* Turn on promiscuous mode; turn off the hash filter */
1456 		v |= GEM_MAC_RX_PROMISCUOUS;
1457 		v &= ~GEM_MAC_RX_HASH_FILTER;
1458 		ifp->if_flags |= IFF_ALLMULTI;
1459 		goto chipit;
1460 	}
1461 
1462 	/* Turn off promiscuous mode; turn on the hash filter */
1463 	v &= ~GEM_MAC_RX_PROMISCUOUS;
1464 	v |= GEM_MAC_RX_HASH_FILTER;
1465 
1466 	/*
1467 	 * Set up multicast address filter by passing all multicast addresses
1468 	 * through a crc generator, and then using the high order 6 bits as an
1469 	 * index into the 256 bit logical address filter.  The high order bit
1470 	 * selects the word, while the rest of the bits select the bit within
1471 	 * the word.
1472 	 */
1473 
1474 	ETHER_FIRST_MULTI(step, ac, enm);
1475 	while (enm != NULL) {
1476 		if (ether_cmp(enm->enm_addrlo, enm->enm_addrhi)) {
1477 			/*
1478 			 * We must listen to a range of multicast addresses.
1479 			 * For now, just accept all multicasts, rather than
1480 			 * trying to set only those filter bits needed to match
1481 			 * the range.  (At this time, the only use of address
1482 			 * ranges is for IP multicast routing, for which the
1483 			 * range is big enough to require all bits set.)
1484 			 */
1485 			hash[3] = hash[2] = hash[1] = hash[0] = 0xffff;
1486 			ifp->if_flags |= IFF_ALLMULTI;
1487 			goto chipit;
1488 		}
1489 
1490 		cp = enm->enm_addrlo;
1491 		crc = 0xffffffff;
1492 		for (len = sizeof(enm->enm_addrlo); --len >= 0;) {
1493 			int octet = *cp++;
1494 			int i;
1495 
1496 #define MC_POLY_LE	0xedb88320UL	/* mcast crc, little endian */
1497 			for (i = 0; i < 8; i++) {
1498 				if ((crc & 1) ^ (octet & 1)) {
1499 					crc >>= 1;
1500 					crc ^= MC_POLY_LE;
1501 				} else {
1502 					crc >>= 1;
1503 				}
1504 				octet >>= 1;
1505 			}
1506 		}
1507 		/* Just want the 8 most significant bits. */
1508 		crc >>= 24;
1509 
1510 		/* Set the corresponding bit in the filter. */
1511 		hash[crc >> 4] |= 1 << (crc & 0xf);
1512 
1513 		ETHER_NEXT_MULTI(step, enm);
1514 	}
1515 
1516 	ifp->if_flags &= ~IFF_ALLMULTI;
1517 
1518 chipit:
1519 	/* Now load the hash table into the chip */
1520 	bus_space_write_4(t, h, GEM_MAC_HASH0, hash[0]);
1521 	bus_space_write_4(t, h, GEM_MAC_HASH1, hash[1]);
1522 	bus_space_write_4(t, h, GEM_MAC_HASH2, hash[2]);
1523 	bus_space_write_4(t, h, GEM_MAC_HASH3, hash[3]);
1524 	bus_space_write_4(t, h, GEM_MAC_HASH4, hash[4]);
1525 	bus_space_write_4(t, h, GEM_MAC_HASH5, hash[5]);
1526 	bus_space_write_4(t, h, GEM_MAC_HASH6, hash[6]);
1527 	bus_space_write_4(t, h, GEM_MAC_HASH7, hash[7]);
1528 	bus_space_write_4(t, h, GEM_MAC_HASH8, hash[8]);
1529 	bus_space_write_4(t, h, GEM_MAC_HASH9, hash[9]);
1530 	bus_space_write_4(t, h, GEM_MAC_HASH10, hash[10]);
1531 	bus_space_write_4(t, h, GEM_MAC_HASH11, hash[11]);
1532 	bus_space_write_4(t, h, GEM_MAC_HASH12, hash[12]);
1533 	bus_space_write_4(t, h, GEM_MAC_HASH13, hash[13]);
1534 	bus_space_write_4(t, h, GEM_MAC_HASH14, hash[14]);
1535 	bus_space_write_4(t, h, GEM_MAC_HASH15, hash[15]);
1536 
1537 	bus_space_write_4(t, h, GEM_MAC_RX_CONFIG, v);
1538 }
1539 
1540 int
1541 gem_encap(sc, mhead, bixp)
1542 	struct gem_softc *sc;
1543 	struct mbuf *mhead;
1544 	u_int32_t *bixp;
1545 {
1546 	u_int64_t flags;
1547 	u_int32_t cur, frag, i;
1548 	bus_dmamap_t map;
1549 
1550 	cur = frag = *bixp;
1551 
1552 	if (bus_dmamap_create(sc->sc_dmatag, MCLBYTES, GEM_NTXDESC,
1553 	    MCLBYTES, 0, BUS_DMA_NOWAIT, &map) != 0) {
1554 		return (ENOBUFS);
1555 	}
1556 
1557 	if (bus_dmamap_load_mbuf(sc->sc_dmatag, map, mhead,
1558 	    BUS_DMA_NOWAIT) != 0) {
1559 		bus_dmamap_destroy(sc->sc_dmatag, map);
1560 		return (ENOBUFS);
1561 	}
1562 
1563 	if ((sc->sc_tx_cnt + map->dm_nsegs) > (GEM_NTXDESC - 2)) {
1564 		bus_dmamap_unload(sc->sc_dmatag, map);
1565 		bus_dmamap_destroy(sc->sc_dmatag, map);
1566 		return (ENOBUFS);
1567 	}
1568 
1569 	for (i = 0; i < map->dm_nsegs; i++) {
1570 		sc->sc_txdescs[frag].gd_addr =
1571 		    GEM_DMA_WRITE(sc, map->dm_segs[i].ds_addr);
1572 		flags = (map->dm_segs[i].ds_len & GEM_TD_BUFSIZE) |
1573 		    (i == 0 ? GEM_TD_START_OF_PACKET : 0) |
1574 		    ((i == (map->dm_nsegs - 1)) ? GEM_TD_END_OF_PACKET : 0);
1575 		sc->sc_txdescs[frag].gd_flags = GEM_DMA_WRITE(sc, flags);
1576 		bus_dmamap_sync(sc->sc_dmatag, sc->sc_cddmamap,
1577 		    GEM_CDTXOFF(frag), sizeof(struct gem_desc),
1578 		    BUS_DMASYNC_PREWRITE);
1579 		cur = frag;
1580 		if (++frag == GEM_NTXDESC)
1581 			frag = 0;
1582 	}
1583 	bus_dmamap_sync(sc->sc_dmatag, map, 0, map->dm_mapsize,
1584 	    BUS_DMASYNC_PREWRITE);
1585 	sc->sc_tx_cnt += map->dm_nsegs;
1586 	sc->sc_txd[cur].sd_map = map;
1587 	sc->sc_txd[cur].sd_mbuf = mhead;
1588 
1589 	bus_space_write_4(sc->sc_bustag, sc->sc_h, GEM_TX_KICK, frag);
1590 
1591 	*bixp = frag;
1592 
1593 	/* sync descriptors */
1594 
1595 	return (0);
1596 }
1597 
1598 /*
1599  * Transmit interrupt.
1600  */
1601 int
1602 gem_tint(sc, status)
1603 	struct gem_softc *sc;
1604 	u_int32_t status;
1605 {
1606 	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
1607 	struct gem_sxd *sd;
1608 	u_int32_t cons, hwcons;
1609 
1610 	hwcons = status >> 19;
1611 	cons = sc->sc_tx_cons;
1612 	while (cons != hwcons) {
1613 		sd = &sc->sc_txd[cons];
1614 		if (sd->sd_map != NULL) {
1615 			bus_dmamap_sync(sc->sc_dmatag, sd->sd_map, 0,
1616 			    sd->sd_map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
1617 			bus_dmamap_unload(sc->sc_dmatag, sd->sd_map);
1618 			bus_dmamap_destroy(sc->sc_dmatag, sd->sd_map);
1619 			sd->sd_map = NULL;
1620 		}
1621 		if (sd->sd_mbuf != NULL) {
1622 			m_freem(sd->sd_mbuf);
1623 			sd->sd_mbuf = NULL;
1624 		}
1625 		sc->sc_tx_cnt--;
1626 		if (++cons == GEM_NTXDESC)
1627 			cons = 0;
1628 	}
1629 	sc->sc_tx_cons = cons;
1630 
1631 	gem_start(ifp);
1632 
1633 	if (sc->sc_tx_cnt == 0)
1634 		ifp->if_timer = 0;
1635 
1636 	return (1);
1637 }
1638 
1639 void
1640 gem_start(ifp)
1641 	struct ifnet *ifp;
1642 {
1643 	struct gem_softc *sc = ifp->if_softc;
1644 	struct mbuf *m;
1645 	u_int32_t bix;
1646 
1647 	if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING)
1648 		return;
1649 
1650 	bix = sc->sc_tx_prod;
1651 	while (sc->sc_txd[bix].sd_mbuf == NULL) {
1652 		IFQ_POLL(&ifp->if_snd, m);
1653 		if (m == NULL)
1654 			break;
1655 
1656 #if NBPFILTER > 0
1657 		/*
1658 		 * If BPF is listening on this interface, let it see the
1659 		 * packet before we commit it to the wire.
1660 		 */
1661 		if (ifp->if_bpf)
1662 			bpf_mtap(ifp->if_bpf, m);
1663 #endif
1664 
1665 		/*
1666 		 * Encapsulate this packet and start it going...
1667 		 * or fail...
1668 		 */
1669 		if (gem_encap(sc, m, &bix)) {
1670 			ifp->if_flags |= IFF_OACTIVE;
1671 			break;
1672 		}
1673 
1674 		IFQ_DEQUEUE(&ifp->if_snd, m);
1675 		ifp->if_timer = 5;
1676 	}
1677 
1678 	sc->sc_tx_prod = bix;
1679 }
1680