xref: /netbsd-src/sys/dev/ic/smc83c170.c (revision 8ac07aec990b9d2e483062509d0a9fa5b4f57cf2)
1 /*	$NetBSD: smc83c170.c,v 1.73 2008/04/08 12:07:27 cegger Exp $	*/
2 
3 /*-
4  * Copyright (c) 1998, 1999 The NetBSD Foundation, Inc.
5  * All rights reserved.
6  *
7  * This code is derived from software contributed to The NetBSD Foundation
8  * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
9  * NASA Ames Research Center.
10  *
11  * Redistribution and use in source and binary forms, with or without
12  * modification, are permitted provided that the following conditions
13  * are met:
14  * 1. Redistributions of source code must retain the above copyright
15  *    notice, this list of conditions and the following disclaimer.
16  * 2. Redistributions in binary form must reproduce the above copyright
17  *    notice, this list of conditions and the following disclaimer in the
18  *    documentation and/or other materials provided with the distribution.
19  * 3. All advertising materials mentioning features or use of this software
20  *    must display the following acknowledgement:
21  *	This product includes software developed by the NetBSD
22  *	Foundation, Inc. and its contributors.
23  * 4. Neither the name of The NetBSD Foundation nor the names of its
24  *    contributors may be used to endorse or promote products derived
25  *    from this software without specific prior written permission.
26  *
27  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
28  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
29  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
30  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
31  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
32  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
33  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
34  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
35  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
36  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
37  * POSSIBILITY OF SUCH DAMAGE.
38  */
39 
40 /*
41  * Device driver for the Standard Microsystems Corp. 83C170
42  * Ethernet PCI Integrated Controller (EPIC/100).
43  */
44 
45 #include <sys/cdefs.h>
46 __KERNEL_RCSID(0, "$NetBSD: smc83c170.c,v 1.73 2008/04/08 12:07:27 cegger Exp $");
47 
48 #include "bpfilter.h"
49 
50 #include <sys/param.h>
51 #include <sys/systm.h>
52 #include <sys/callout.h>
53 #include <sys/mbuf.h>
54 #include <sys/malloc.h>
55 #include <sys/kernel.h>
56 #include <sys/socket.h>
57 #include <sys/ioctl.h>
58 #include <sys/errno.h>
59 #include <sys/device.h>
60 
61 #include <uvm/uvm_extern.h>
62 
63 #include <net/if.h>
64 #include <net/if_dl.h>
65 #include <net/if_media.h>
66 #include <net/if_ether.h>
67 
68 #if NBPFILTER > 0
69 #include <net/bpf.h>
70 #endif
71 
72 #include <sys/bus.h>
73 #include <sys/intr.h>
74 
75 #include <dev/mii/miivar.h>
76 #include <dev/mii/lxtphyreg.h>
77 
78 #include <dev/ic/smc83c170reg.h>
79 #include <dev/ic/smc83c170var.h>
80 
81 void	epic_start(struct ifnet *);
82 void	epic_watchdog(struct ifnet *);
83 int	epic_ioctl(struct ifnet *, u_long, void *);
84 int	epic_init(struct ifnet *);
85 void	epic_stop(struct ifnet *, int);
86 
87 void	epic_shutdown(void *);
88 
89 void	epic_reset(struct epic_softc *);
90 void	epic_rxdrain(struct epic_softc *);
91 int	epic_add_rxbuf(struct epic_softc *, int);
92 void	epic_read_eeprom(struct epic_softc *, int, int, uint16_t *);
93 void	epic_set_mchash(struct epic_softc *);
94 void	epic_fixup_clock_source(struct epic_softc *);
95 int	epic_mii_read(struct device *, int, int);
96 void	epic_mii_write(struct device *, int, int, int);
97 int	epic_mii_wait(struct epic_softc *, uint32_t);
98 void	epic_tick(void *);
99 
100 void	epic_statchg(struct device *);
101 int	epic_mediachange(struct ifnet *);
102 
103 #define	INTMASK	(INTSTAT_FATAL_INT | INTSTAT_TXU | \
104 	    INTSTAT_TXC | INTSTAT_RXE | INTSTAT_RQE | INTSTAT_RCC)
105 
106 int	epic_copy_small = 0;
107 
108 #define	ETHER_PAD_LEN (ETHER_MIN_LEN - ETHER_CRC_LEN)
109 
110 /*
111  * Attach an EPIC interface to the system.
112  */
113 void
114 epic_attach(sc)
115 	struct epic_softc *sc;
116 {
117 	bus_space_tag_t st = sc->sc_st;
118 	bus_space_handle_t sh = sc->sc_sh;
119 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
120 	int rseg, error, miiflags;
121 	u_int i;
122 	bus_dma_segment_t seg;
123 	uint8_t enaddr[ETHER_ADDR_LEN], devname[12 + 1];
124 	uint16_t myea[ETHER_ADDR_LEN / 2], mydevname[6];
125 	char *nullbuf;
126 
127 	callout_init(&sc->sc_mii_callout, 0);
128 
129 	/*
130 	 * Allocate the control data structures, and create and load the
131 	 * DMA map for it.
132 	 */
133 	if ((error = bus_dmamem_alloc(sc->sc_dmat,
134 	    sizeof(struct epic_control_data) + ETHER_PAD_LEN, PAGE_SIZE, 0,
135 	    &seg, 1, &rseg, BUS_DMA_NOWAIT)) != 0) {
136 		aprint_error_dev(&sc->sc_dev,
137 		    "unable to allocate control data, error = %d\n",
138 		    error);
139 		goto fail_0;
140 	}
141 
142 	if ((error = bus_dmamem_map(sc->sc_dmat, &seg, rseg,
143 	    sizeof(struct epic_control_data) + ETHER_PAD_LEN,
144 	    (void **)&sc->sc_control_data,
145 	    BUS_DMA_NOWAIT|BUS_DMA_COHERENT)) != 0) {
146 		aprint_error_dev(&sc->sc_dev, "unable to map control data, error = %d\n", error);
147 		goto fail_1;
148 	}
149 	nullbuf =
150 	    (char *)sc->sc_control_data + sizeof(struct epic_control_data);
151 	memset(nullbuf, 0, ETHER_PAD_LEN);
152 
153 	if ((error = bus_dmamap_create(sc->sc_dmat,
154 	    sizeof(struct epic_control_data), 1,
155 	    sizeof(struct epic_control_data), 0, BUS_DMA_NOWAIT,
156 	    &sc->sc_cddmamap)) != 0) {
157 		aprint_error_dev(&sc->sc_dev, "unable to create control data DMA map, "
158 		    "error = %d\n", error);
159 		goto fail_2;
160 	}
161 
162 	if ((error = bus_dmamap_load(sc->sc_dmat, sc->sc_cddmamap,
163 	    sc->sc_control_data, sizeof(struct epic_control_data), NULL,
164 	    BUS_DMA_NOWAIT)) != 0) {
165 		aprint_error_dev(&sc->sc_dev,
166 		    "unable to load control data DMA map, error = %d\n",
167 		    error);
168 		goto fail_3;
169 	}
170 
171 	/*
172 	 * Create the transmit buffer DMA maps.
173 	 */
174 	for (i = 0; i < EPIC_NTXDESC; i++) {
175 		if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES,
176 		    EPIC_NFRAGS, MCLBYTES, 0, BUS_DMA_NOWAIT,
177 		    &EPIC_DSTX(sc, i)->ds_dmamap)) != 0) {
178 			aprint_error_dev(&sc->sc_dev, "unable to create tx DMA map %d, "
179 			    "error = %d\n", i, error);
180 			goto fail_4;
181 		}
182 	}
183 
184 	/*
185 	 * Create the receive buffer DMA maps.
186 	 */
187 	for (i = 0; i < EPIC_NRXDESC; i++) {
188 		if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
189 		    MCLBYTES, 0, BUS_DMA_NOWAIT,
190 		    &EPIC_DSRX(sc, i)->ds_dmamap)) != 0) {
191 			aprint_error_dev(&sc->sc_dev, "unable to create rx DMA map %d, "
192 			    "error = %d\n", i, error);
193 			goto fail_5;
194 		}
195 		EPIC_DSRX(sc, i)->ds_mbuf = NULL;
196 	}
197 
198 	/*
199 	 * create and map the pad buffer
200 	 */
201 	if ((error = bus_dmamap_create(sc->sc_dmat, ETHER_PAD_LEN, 1,
202 	    ETHER_PAD_LEN, 0, BUS_DMA_NOWAIT,&sc->sc_nulldmamap)) != 0) {
203 		aprint_error_dev(&sc->sc_dev, "unable to create pad buffer DMA map, "
204 		    "error = %d\n", error);
205 		goto fail_5;
206 	}
207 
208 	if ((error = bus_dmamap_load(sc->sc_dmat, sc->sc_nulldmamap,
209 	    nullbuf, ETHER_PAD_LEN, NULL, BUS_DMA_NOWAIT)) != 0) {
210 		aprint_error_dev(&sc->sc_dev, "unable to load pad buffer DMA map, "
211 		    "error = %d\n", error);
212 		goto fail_6;
213 	}
214 	bus_dmamap_sync(sc->sc_dmat, sc->sc_nulldmamap, 0, ETHER_PAD_LEN,
215 	    BUS_DMASYNC_PREWRITE);
216 
217 	/*
218 	 * Bring the chip out of low-power mode and reset it to a known state.
219 	 */
220 	bus_space_write_4(st, sh, EPIC_GENCTL, 0);
221 	epic_reset(sc);
222 
223 	/*
224 	 * Read the Ethernet address from the EEPROM.
225 	 */
226 	epic_read_eeprom(sc, 0, __arraycount(myea), myea);
227 	for (i = 0; i < __arraycount(myea); i++) {
228 		enaddr[i * 2]     = myea[i] & 0xff;
229 		enaddr[i * 2 + 1] = myea[i] >> 8;
230 	}
231 
232 	/*
233 	 * ...and the device name.
234 	 */
235 	epic_read_eeprom(sc, 0x2c, __arraycount(mydevname), mydevname);
236 	for (i = 0; i < __arraycount(mydevname); i++) {
237 		devname[i * 2]     = mydevname[i] & 0xff;
238 		devname[i * 2 + 1] = mydevname[i] >> 8;
239 	}
240 
241 	devname[sizeof(mydevname)] = '\0';
242 	for (i = sizeof(mydevname) ; i > 0; i--) {
243 		if (devname[i - 1] == ' ')
244 			devname[i - 1] = '\0';
245 		else
246 			break;
247 	}
248 
249 	aprint_normal_dev(&sc->sc_dev, "%s, Ethernet address %s\n",
250 	    devname, ether_sprintf(enaddr));
251 
252 	miiflags = 0;
253 	if (sc->sc_hwflags & EPIC_HAS_MII_FIBER)
254 		miiflags |= MIIF_HAVEFIBER;
255 
256 	/*
257 	 * Initialize our media structures and probe the MII.
258 	 */
259 	sc->sc_mii.mii_ifp = ifp;
260 	sc->sc_mii.mii_readreg = epic_mii_read;
261 	sc->sc_mii.mii_writereg = epic_mii_write;
262 	sc->sc_mii.mii_statchg = epic_statchg;
263 
264 	sc->sc_ethercom.ec_mii = &sc->sc_mii;
265 	ifmedia_init(&sc->sc_mii.mii_media, IFM_IMASK, epic_mediachange,
266 	    ether_mediastatus);
267 	mii_attach(&sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
268 	    MII_OFFSET_ANY, miiflags);
269 	if (LIST_EMPTY(&sc->sc_mii.mii_phys)) {
270 		ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE, 0, NULL);
271 		ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE);
272 	} else
273 		ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_AUTO);
274 
275 	if (sc->sc_hwflags & EPIC_HAS_BNC) {
276 		/* use the next free media instance */
277 		sc->sc_serinst = sc->sc_mii.mii_instance++;
278 		ifmedia_add(&sc->sc_mii.mii_media,
279 			    IFM_MAKEWORD(IFM_ETHER, IFM_10_2, 0,
280 					 sc->sc_serinst),
281 			    0, NULL);
282 		aprint_normal_dev(&sc->sc_dev, "10base2/BNC\n");
283 	} else
284 		sc->sc_serinst = -1;
285 
286 	strlcpy(ifp->if_xname, device_xname(&sc->sc_dev), IFNAMSIZ);
287 	ifp->if_softc = sc;
288 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
289 	ifp->if_ioctl = epic_ioctl;
290 	ifp->if_start = epic_start;
291 	ifp->if_watchdog = epic_watchdog;
292 	ifp->if_init = epic_init;
293 	ifp->if_stop = epic_stop;
294 	IFQ_SET_READY(&ifp->if_snd);
295 
296 	/*
297 	 * We can support 802.1Q VLAN-sized frames.
298 	 */
299 	sc->sc_ethercom.ec_capabilities |= ETHERCAP_VLAN_MTU;
300 
301 	/*
302 	 * Attach the interface.
303 	 */
304 	if_attach(ifp);
305 	ether_ifattach(ifp, enaddr);
306 
307 	/*
308 	 * Make sure the interface is shutdown during reboot.
309 	 */
310 	sc->sc_sdhook = shutdownhook_establish(epic_shutdown, sc);
311 	if (sc->sc_sdhook == NULL)
312 		aprint_error_dev(&sc->sc_dev, "WARNING: unable to establish shutdown hook\n");
313 	return;
314 
315 	/*
316 	 * Free any resources we've allocated during the failed attach
317 	 * attempt.  Do this in reverse order and fall through.
318 	 */
319  fail_6:
320 	bus_dmamap_destroy(sc->sc_dmat, sc->sc_nulldmamap);
321  fail_5:
322 	for (i = 0; i < EPIC_NRXDESC; i++) {
323 		if (EPIC_DSRX(sc, i)->ds_dmamap != NULL)
324 			bus_dmamap_destroy(sc->sc_dmat,
325 			    EPIC_DSRX(sc, i)->ds_dmamap);
326 	}
327  fail_4:
328 	for (i = 0; i < EPIC_NTXDESC; i++) {
329 		if (EPIC_DSTX(sc, i)->ds_dmamap != NULL)
330 			bus_dmamap_destroy(sc->sc_dmat,
331 			    EPIC_DSTX(sc, i)->ds_dmamap);
332 	}
333 	bus_dmamap_unload(sc->sc_dmat, sc->sc_cddmamap);
334  fail_3:
335 	bus_dmamap_destroy(sc->sc_dmat, sc->sc_cddmamap);
336  fail_2:
337 	bus_dmamem_unmap(sc->sc_dmat, (void *)sc->sc_control_data,
338 	    sizeof(struct epic_control_data));
339  fail_1:
340 	bus_dmamem_free(sc->sc_dmat, &seg, rseg);
341  fail_0:
342 	return;
343 }
344 
345 /*
346  * Shutdown hook.  Make sure the interface is stopped at reboot.
347  */
348 void
349 epic_shutdown(arg)
350 	void *arg;
351 {
352 	struct epic_softc *sc = arg;
353 
354 	epic_stop(&sc->sc_ethercom.ec_if, 1);
355 }
356 
357 /*
358  * Start packet transmission on the interface.
359  * [ifnet interface function]
360  */
361 void
362 epic_start(ifp)
363 	struct ifnet *ifp;
364 {
365 	struct epic_softc *sc = ifp->if_softc;
366 	struct mbuf *m0, *m;
367 	struct epic_txdesc *txd;
368 	struct epic_descsoft *ds;
369 	struct epic_fraglist *fr;
370 	bus_dmamap_t dmamap;
371 	int error, firsttx, nexttx, opending, seg;
372 	u_int len;
373 
374 	/*
375 	 * Remember the previous txpending and the first transmit
376 	 * descriptor we use.
377 	 */
378 	opending = sc->sc_txpending;
379 	firsttx = EPIC_NEXTTX(sc->sc_txlast);
380 
381 	/*
382 	 * Loop through the send queue, setting up transmit descriptors
383 	 * until we drain the queue, or use up all available transmit
384 	 * descriptors.
385 	 */
386 	while (sc->sc_txpending < EPIC_NTXDESC) {
387 		/*
388 		 * Grab a packet off the queue.
389 		 */
390 		IFQ_POLL(&ifp->if_snd, m0);
391 		if (m0 == NULL)
392 			break;
393 		m = NULL;
394 
395 		/*
396 		 * Get the last and next available transmit descriptor.
397 		 */
398 		nexttx = EPIC_NEXTTX(sc->sc_txlast);
399 		txd = EPIC_CDTX(sc, nexttx);
400 		fr = EPIC_CDFL(sc, nexttx);
401 		ds = EPIC_DSTX(sc, nexttx);
402 		dmamap = ds->ds_dmamap;
403 
404 		/*
405 		 * Load the DMA map.  If this fails, the packet either
406 		 * didn't fit in the alloted number of frags, or we were
407 		 * short on resources.  In this case, we'll copy and try
408 		 * again.
409 		 */
410 		if ((error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
411 		    BUS_DMA_WRITE|BUS_DMA_NOWAIT)) != 0 ||
412 		    (m0->m_pkthdr.len < ETHER_PAD_LEN &&
413 		    dmamap-> dm_nsegs == EPIC_NFRAGS)) {
414 			if (error == 0)
415 				bus_dmamap_unload(sc->sc_dmat, dmamap);
416 
417 			MGETHDR(m, M_DONTWAIT, MT_DATA);
418 			if (m == NULL) {
419 				aprint_error_dev(&sc->sc_dev, "unable to allocate Tx mbuf\n");
420 				break;
421 			}
422 			if (m0->m_pkthdr.len > MHLEN) {
423 				MCLGET(m, M_DONTWAIT);
424 				if ((m->m_flags & M_EXT) == 0) {
425 					aprint_error_dev(&sc->sc_dev, "unable to allocate Tx "
426 					    "cluster\n");
427 					m_freem(m);
428 					break;
429 				}
430 			}
431 			m_copydata(m0, 0, m0->m_pkthdr.len, mtod(m, void *));
432 			m->m_pkthdr.len = m->m_len = m0->m_pkthdr.len;
433 			error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap,
434 			    m, BUS_DMA_WRITE|BUS_DMA_NOWAIT);
435 			if (error) {
436 				aprint_error_dev(&sc->sc_dev, "unable to load Tx buffer, "
437 				    "error = %d\n", error);
438 				break;
439 			}
440 		}
441 		IFQ_DEQUEUE(&ifp->if_snd, m0);
442 		if (m != NULL) {
443 			m_freem(m0);
444 			m0 = m;
445 		}
446 
447 		/* Initialize the fraglist. */
448 		for (seg = 0; seg < dmamap->dm_nsegs; seg++) {
449 			fr->ef_frags[seg].ef_addr =
450 			    dmamap->dm_segs[seg].ds_addr;
451 			fr->ef_frags[seg].ef_length =
452 			    dmamap->dm_segs[seg].ds_len;
453 		}
454 		len = m0->m_pkthdr.len;
455 		if (len < ETHER_PAD_LEN) {
456 			fr->ef_frags[seg].ef_addr = sc->sc_nulldma;
457 			fr->ef_frags[seg].ef_length = ETHER_PAD_LEN - len;
458 			len = ETHER_PAD_LEN;
459 			seg++;
460 		}
461 		fr->ef_nfrags = seg;
462 
463 		EPIC_CDFLSYNC(sc, nexttx, BUS_DMASYNC_PREWRITE);
464 
465 		/* Sync the DMA map. */
466 		bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
467 		    BUS_DMASYNC_PREWRITE);
468 
469 		/*
470 		 * Store a pointer to the packet so we can free it later.
471 		 */
472 		ds->ds_mbuf = m0;
473 
474 		/*
475 		 * Fill in the transmit descriptor.
476 		 */
477 		txd->et_control = ET_TXCTL_LASTDESC | ET_TXCTL_FRAGLIST;
478 
479 		/*
480 		 * If this is the first descriptor we're enqueueing,
481 		 * don't give it to the EPIC yet.  That could cause
482 		 * a race condition.  We'll do it below.
483 		 */
484 		if (nexttx == firsttx)
485 			txd->et_txstatus = TXSTAT_TXLENGTH(len);
486 		else
487 			txd->et_txstatus =
488 			    TXSTAT_TXLENGTH(len) | ET_TXSTAT_OWNER;
489 
490 		EPIC_CDTXSYNC(sc, nexttx,
491 		    BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
492 
493 		/* Advance the tx pointer. */
494 		sc->sc_txpending++;
495 		sc->sc_txlast = nexttx;
496 
497 #if NBPFILTER > 0
498 		/*
499 		 * Pass the packet to any BPF listeners.
500 		 */
501 		if (ifp->if_bpf)
502 			bpf_mtap(ifp->if_bpf, m0);
503 #endif
504 	}
505 
506 	if (sc->sc_txpending == EPIC_NTXDESC) {
507 		/* No more slots left; notify upper layer. */
508 		ifp->if_flags |= IFF_OACTIVE;
509 	}
510 
511 	if (sc->sc_txpending != opending) {
512 		/*
513 		 * We enqueued packets.  If the transmitter was idle,
514 		 * reset the txdirty pointer.
515 		 */
516 		if (opending == 0)
517 			sc->sc_txdirty = firsttx;
518 
519 		/*
520 		 * Cause a transmit interrupt to happen on the
521 		 * last packet we enqueued.
522 		 */
523 		EPIC_CDTX(sc, sc->sc_txlast)->et_control |= ET_TXCTL_IAF;
524 		EPIC_CDTXSYNC(sc, sc->sc_txlast,
525 		    BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
526 
527 		/*
528 		 * The entire packet chain is set up.  Give the
529 		 * first descriptor to the EPIC now.
530 		 */
531 		EPIC_CDTX(sc, firsttx)->et_txstatus |= ET_TXSTAT_OWNER;
532 		EPIC_CDTXSYNC(sc, firsttx,
533 		    BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
534 
535 		/* Start the transmitter. */
536 		bus_space_write_4(sc->sc_st, sc->sc_sh, EPIC_COMMAND,
537 		    COMMAND_TXQUEUED);
538 
539 		/* Set a watchdog timer in case the chip flakes out. */
540 		ifp->if_timer = 5;
541 	}
542 }
543 
544 /*
545  * Watchdog timer handler.
546  * [ifnet interface function]
547  */
548 void
549 epic_watchdog(ifp)
550 	struct ifnet *ifp;
551 {
552 	struct epic_softc *sc = ifp->if_softc;
553 
554 	printf("%s: device timeout\n", device_xname(&sc->sc_dev));
555 	ifp->if_oerrors++;
556 
557 	(void) epic_init(ifp);
558 }
559 
560 /*
561  * Handle control requests from the operator.
562  * [ifnet interface function]
563  */
564 int
565 epic_ioctl(ifp, cmd, data)
566 	struct ifnet *ifp;
567 	u_long cmd;
568 	void *data;
569 {
570 	struct epic_softc *sc = ifp->if_softc;
571 	int s, error;
572 
573 	s = splnet();
574 
575 	error = ether_ioctl(ifp, cmd, data);
576 	if (error == ENETRESET) {
577 		/*
578 		 * Multicast list has changed; set the hardware filter
579 		 * accordingly.  Update our idea of the current media;
580 		 * epic_set_mchash() needs to know what it is.
581 		 */
582 		if (ifp->if_flags & IFF_RUNNING) {
583 			mii_pollstat(&sc->sc_mii);
584 			epic_set_mchash(sc);
585 		}
586 		error = 0;
587 	}
588 
589 	splx(s);
590 	return (error);
591 }
592 
593 /*
594  * Interrupt handler.
595  */
596 int
597 epic_intr(arg)
598 	void *arg;
599 {
600 	struct epic_softc *sc = arg;
601 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
602 	struct epic_rxdesc *rxd;
603 	struct epic_txdesc *txd;
604 	struct epic_descsoft *ds;
605 	struct mbuf *m;
606 	uint32_t intstat, rxstatus, txstatus;
607 	int i, claimed = 0;
608 	u_int len;
609 
610  top:
611 	/*
612 	 * Get the interrupt status from the EPIC.
613 	 */
614 	intstat = bus_space_read_4(sc->sc_st, sc->sc_sh, EPIC_INTSTAT);
615 	if ((intstat & INTSTAT_INT_ACTV) == 0)
616 		return (claimed);
617 
618 	claimed = 1;
619 
620 	/*
621 	 * Acknowledge the interrupt.
622 	 */
623 	bus_space_write_4(sc->sc_st, sc->sc_sh, EPIC_INTSTAT,
624 	    intstat & INTMASK);
625 
626 	/*
627 	 * Check for receive interrupts.
628 	 */
629 	if (intstat & (INTSTAT_RCC | INTSTAT_RXE | INTSTAT_RQE)) {
630 		for (i = sc->sc_rxptr;; i = EPIC_NEXTRX(i)) {
631 			rxd = EPIC_CDRX(sc, i);
632 			ds = EPIC_DSRX(sc, i);
633 
634 			EPIC_CDRXSYNC(sc, i,
635 			    BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
636 
637 			rxstatus = rxd->er_rxstatus;
638 			if (rxstatus & ER_RXSTAT_OWNER) {
639 				/*
640 				 * We have processed all of the
641 				 * receive buffers.
642 				 */
643 				break;
644 			}
645 
646 			/*
647 			 * Make sure the packet arrived intact.  If an error
648 			 * occurred, update stats and reset the descriptor.
649 			 * The buffer will be reused the next time the
650 			 * descriptor comes up in the ring.
651 			 */
652 			if ((rxstatus & ER_RXSTAT_PKTINTACT) == 0) {
653 				if (rxstatus & ER_RXSTAT_CRCERROR)
654 					aprint_error_dev(&sc->sc_dev, "CRC error\n");
655 				if (rxstatus & ER_RXSTAT_ALIGNERROR)
656 					aprint_error_dev(&sc->sc_dev, "alignment error\n");
657 				ifp->if_ierrors++;
658 				EPIC_INIT_RXDESC(sc, i);
659 				continue;
660 			}
661 
662 			bus_dmamap_sync(sc->sc_dmat, ds->ds_dmamap, 0,
663 			    ds->ds_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
664 
665 			/*
666 			 * The EPIC includes the CRC with every packet;
667 			 * trim it.
668 			 */
669 			len = RXSTAT_RXLENGTH(rxstatus) - ETHER_CRC_LEN;
670 
671 			if (len < sizeof(struct ether_header)) {
672 				/*
673 				 * Runt packet; drop it now.
674 				 */
675 				ifp->if_ierrors++;
676 				EPIC_INIT_RXDESC(sc, i);
677 				bus_dmamap_sync(sc->sc_dmat, ds->ds_dmamap, 0,
678 				    ds->ds_dmamap->dm_mapsize,
679 				    BUS_DMASYNC_PREREAD);
680 				continue;
681 			}
682 
683 			/*
684 			 * If the packet is small enough to fit in a
685 			 * single header mbuf, allocate one and copy
686 			 * the data into it.  This greatly reduces
687 			 * memory consumption when we receive lots
688 			 * of small packets.
689 			 *
690 			 * Otherwise, we add a new buffer to the receive
691 			 * chain.  If this fails, we drop the packet and
692 			 * recycle the old buffer.
693 			 */
694 			if (epic_copy_small != 0 && len <= MHLEN) {
695 				MGETHDR(m, M_DONTWAIT, MT_DATA);
696 				if (m == NULL)
697 					goto dropit;
698 				memcpy(mtod(m, void *),
699 				    mtod(ds->ds_mbuf, void *), len);
700 				EPIC_INIT_RXDESC(sc, i);
701 				bus_dmamap_sync(sc->sc_dmat, ds->ds_dmamap, 0,
702 				    ds->ds_dmamap->dm_mapsize,
703 				    BUS_DMASYNC_PREREAD);
704 			} else {
705 				m = ds->ds_mbuf;
706 				if (epic_add_rxbuf(sc, i) != 0) {
707  dropit:
708 					ifp->if_ierrors++;
709 					EPIC_INIT_RXDESC(sc, i);
710 					bus_dmamap_sync(sc->sc_dmat,
711 					    ds->ds_dmamap, 0,
712 					    ds->ds_dmamap->dm_mapsize,
713 					    BUS_DMASYNC_PREREAD);
714 					continue;
715 				}
716 			}
717 
718 			m->m_pkthdr.rcvif = ifp;
719 			m->m_pkthdr.len = m->m_len = len;
720 
721 #if NBPFILTER > 0
722 			/*
723 			 * Pass this up to any BPF listeners, but only
724 			 * pass it up the stack if it's for us.
725 			 */
726 			if (ifp->if_bpf)
727 				bpf_mtap(ifp->if_bpf, m);
728 #endif
729 
730 			/* Pass it on. */
731 			(*ifp->if_input)(ifp, m);
732 			ifp->if_ipackets++;
733 		}
734 
735 		/* Update the receive pointer. */
736 		sc->sc_rxptr = i;
737 
738 		/*
739 		 * Check for receive queue underflow.
740 		 */
741 		if (intstat & INTSTAT_RQE) {
742 			aprint_error_dev(&sc->sc_dev, "receiver queue empty\n");
743 			/*
744 			 * Ring is already built; just restart the
745 			 * receiver.
746 			 */
747 			bus_space_write_4(sc->sc_st, sc->sc_sh, EPIC_PRCDAR,
748 			    EPIC_CDRXADDR(sc, sc->sc_rxptr));
749 			bus_space_write_4(sc->sc_st, sc->sc_sh, EPIC_COMMAND,
750 			    COMMAND_RXQUEUED | COMMAND_START_RX);
751 		}
752 	}
753 
754 	/*
755 	 * Check for transmission complete interrupts.
756 	 */
757 	if (intstat & (INTSTAT_TXC | INTSTAT_TXU)) {
758 		ifp->if_flags &= ~IFF_OACTIVE;
759 		for (i = sc->sc_txdirty; sc->sc_txpending != 0;
760 		     i = EPIC_NEXTTX(i), sc->sc_txpending--) {
761 			txd = EPIC_CDTX(sc, i);
762 			ds = EPIC_DSTX(sc, i);
763 
764 			EPIC_CDTXSYNC(sc, i,
765 			    BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
766 
767 			txstatus = txd->et_txstatus;
768 			if (txstatus & ET_TXSTAT_OWNER)
769 				break;
770 
771 			EPIC_CDFLSYNC(sc, i, BUS_DMASYNC_POSTWRITE);
772 
773 			bus_dmamap_sync(sc->sc_dmat, ds->ds_dmamap,
774 			    0, ds->ds_dmamap->dm_mapsize,
775 			    BUS_DMASYNC_POSTWRITE);
776 			bus_dmamap_unload(sc->sc_dmat, ds->ds_dmamap);
777 			m_freem(ds->ds_mbuf);
778 			ds->ds_mbuf = NULL;
779 
780 			/*
781 			 * Check for errors and collisions.
782 			 */
783 			if ((txstatus & ET_TXSTAT_PACKETTX) == 0)
784 				ifp->if_oerrors++;
785 			else
786 				ifp->if_opackets++;
787 			ifp->if_collisions +=
788 			    TXSTAT_COLLISIONS(txstatus);
789 			if (txstatus & ET_TXSTAT_CARSENSELOST)
790 				aprint_error_dev(&sc->sc_dev, "lost carrier\n");
791 		}
792 
793 		/* Update the dirty transmit buffer pointer. */
794 		sc->sc_txdirty = i;
795 
796 		/*
797 		 * Cancel the watchdog timer if there are no pending
798 		 * transmissions.
799 		 */
800 		if (sc->sc_txpending == 0)
801 			ifp->if_timer = 0;
802 
803 		/*
804 		 * Kick the transmitter after a DMA underrun.
805 		 */
806 		if (intstat & INTSTAT_TXU) {
807 			aprint_error_dev(&sc->sc_dev, "transmit underrun\n");
808 			bus_space_write_4(sc->sc_st, sc->sc_sh,
809 			    EPIC_COMMAND, COMMAND_TXUGO);
810 			if (sc->sc_txpending)
811 				bus_space_write_4(sc->sc_st, sc->sc_sh,
812 				    EPIC_COMMAND, COMMAND_TXQUEUED);
813 		}
814 
815 		/*
816 		 * Try to get more packets going.
817 		 */
818 		epic_start(ifp);
819 	}
820 
821 	/*
822 	 * Check for fatal interrupts.
823 	 */
824 	if (intstat & INTSTAT_FATAL_INT) {
825 		if (intstat & INTSTAT_PTA)
826 			aprint_error_dev(&sc->sc_dev, "PCI target abort error\n");
827 		else if (intstat & INTSTAT_PMA)
828 			aprint_error_dev(&sc->sc_dev, "PCI master abort error\n");
829 		else if (intstat & INTSTAT_APE)
830 			aprint_error_dev(&sc->sc_dev, "PCI address parity error\n");
831 		else if (intstat & INTSTAT_DPE)
832 			aprint_error_dev(&sc->sc_dev, "PCI data parity error\n");
833 		else
834 			aprint_error_dev(&sc->sc_dev, "unknown fatal error\n");
835 		(void) epic_init(ifp);
836 	}
837 
838 	/*
839 	 * Check for more interrupts.
840 	 */
841 	goto top;
842 }
843 
844 /*
845  * One second timer, used to tick the MII.
846  */
847 void
848 epic_tick(arg)
849 	void *arg;
850 {
851 	struct epic_softc *sc = arg;
852 	int s;
853 
854 	s = splnet();
855 	mii_tick(&sc->sc_mii);
856 	splx(s);
857 
858 	callout_reset(&sc->sc_mii_callout, hz, epic_tick, sc);
859 }
860 
861 /*
862  * Fixup the clock source on the EPIC.
863  */
864 void
865 epic_fixup_clock_source(sc)
866 	struct epic_softc *sc;
867 {
868 	int i;
869 
870 	/*
871 	 * According to SMC Application Note 7-15, the EPIC's clock
872 	 * source is incorrect following a reset.  This manifests itself
873 	 * as failure to recognize when host software has written to
874 	 * a register on the EPIC.  The appnote recommends issuing at
875 	 * least 16 consecutive writes to the CLOCK TEST bit to correctly
876 	 * configure the clock source.
877 	 */
878 	for (i = 0; i < 16; i++)
879 		bus_space_write_4(sc->sc_st, sc->sc_sh, EPIC_TEST,
880 		    TEST_CLOCKTEST);
881 }
882 
883 /*
884  * Perform a soft reset on the EPIC.
885  */
886 void
887 epic_reset(sc)
888 	struct epic_softc *sc;
889 {
890 
891 	epic_fixup_clock_source(sc);
892 
893 	bus_space_write_4(sc->sc_st, sc->sc_sh, EPIC_GENCTL, 0);
894 	delay(100);
895 	bus_space_write_4(sc->sc_st, sc->sc_sh, EPIC_GENCTL, GENCTL_SOFTRESET);
896 	delay(100);
897 
898 	epic_fixup_clock_source(sc);
899 }
900 
901 /*
902  * Initialize the interface.  Must be called at splnet().
903  */
904 int
905 epic_init(ifp)
906 	struct ifnet *ifp;
907 {
908 	struct epic_softc *sc = ifp->if_softc;
909 	bus_space_tag_t st = sc->sc_st;
910 	bus_space_handle_t sh = sc->sc_sh;
911 	const uint8_t *enaddr = CLLADDR(ifp->if_sadl);
912 	struct epic_txdesc *txd;
913 	struct epic_descsoft *ds;
914 	uint32_t genctl, reg0;
915 	int i, error = 0;
916 
917 	/*
918 	 * Cancel any pending I/O.
919 	 */
920 	epic_stop(ifp, 0);
921 
922 	/*
923 	 * Reset the EPIC to a known state.
924 	 */
925 	epic_reset(sc);
926 
927 	/*
928 	 * Magical mystery initialization.
929 	 */
930 	bus_space_write_4(st, sh, EPIC_TXTEST, 0);
931 
932 	/*
933 	 * Initialize the EPIC genctl register:
934 	 *
935 	 *	- 64 byte receive FIFO threshold
936 	 *	- automatic advance to next receive frame
937 	 */
938 	genctl = GENCTL_RX_FIFO_THRESH0 | GENCTL_ONECOPY;
939 #if BYTE_ORDER == BIG_ENDIAN
940 	genctl |= GENCTL_BIG_ENDIAN;
941 #endif
942 	bus_space_write_4(st, sh, EPIC_GENCTL, genctl);
943 
944 	/*
945 	 * Reset the MII bus and PHY.
946 	 */
947 	reg0 = bus_space_read_4(st, sh, EPIC_NVCTL);
948 	bus_space_write_4(st, sh, EPIC_NVCTL, reg0 | NVCTL_GPIO1 | NVCTL_GPOE1);
949 	bus_space_write_4(st, sh, EPIC_MIICFG, MIICFG_ENASER);
950 	bus_space_write_4(st, sh, EPIC_GENCTL, genctl | GENCTL_RESET_PHY);
951 	delay(100);
952 	bus_space_write_4(st, sh, EPIC_GENCTL, genctl);
953 	delay(1000);
954 	bus_space_write_4(st, sh, EPIC_NVCTL, reg0);
955 
956 	/*
957 	 * Initialize Ethernet address.
958 	 */
959 	reg0 = enaddr[1] << 8 | enaddr[0];
960 	bus_space_write_4(st, sh, EPIC_LAN0, reg0);
961 	reg0 = enaddr[3] << 8 | enaddr[2];
962 	bus_space_write_4(st, sh, EPIC_LAN1, reg0);
963 	reg0 = enaddr[5] << 8 | enaddr[4];
964 	bus_space_write_4(st, sh, EPIC_LAN2, reg0);
965 
966 	/*
967 	 * Initialize receive control.  Remember the external buffer
968 	 * size setting.
969 	 */
970 	reg0 = bus_space_read_4(st, sh, EPIC_RXCON) &
971 	    (RXCON_EXTBUFSIZESEL1 | RXCON_EXTBUFSIZESEL0);
972 	reg0 |= (RXCON_RXMULTICAST | RXCON_RXBROADCAST);
973 	if (ifp->if_flags & IFF_PROMISC)
974 		reg0 |= RXCON_PROMISCMODE;
975 	bus_space_write_4(st, sh, EPIC_RXCON, reg0);
976 
977 	/* Set the current media. */
978 	if ((error = epic_mediachange(ifp)) != 0)
979 		goto out;
980 
981 	/* Set up the multicast hash table. */
982 	epic_set_mchash(sc);
983 
984 	/*
985 	 * Initialize the transmit descriptor ring.  txlast is initialized
986 	 * to the end of the list so that it will wrap around to the first
987 	 * descriptor when the first packet is transmitted.
988 	 */
989 	for (i = 0; i < EPIC_NTXDESC; i++) {
990 		txd = EPIC_CDTX(sc, i);
991 		memset(txd, 0, sizeof(struct epic_txdesc));
992 		txd->et_bufaddr = EPIC_CDFLADDR(sc, i);
993 		txd->et_nextdesc = EPIC_CDTXADDR(sc, EPIC_NEXTTX(i));
994 		EPIC_CDTXSYNC(sc, i, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
995 	}
996 	sc->sc_txpending = 0;
997 	sc->sc_txdirty = 0;
998 	sc->sc_txlast = EPIC_NTXDESC - 1;
999 
1000 	/*
1001 	 * Initialize the receive descriptor ring.
1002 	 */
1003 	for (i = 0; i < EPIC_NRXDESC; i++) {
1004 		ds = EPIC_DSRX(sc, i);
1005 		if (ds->ds_mbuf == NULL) {
1006 			if ((error = epic_add_rxbuf(sc, i)) != 0) {
1007 				aprint_error_dev(&sc->sc_dev, "unable to allocate or map rx "
1008 				    "buffer %d error = %d\n",
1009 				    i, error);
1010 				/*
1011 				 * XXX Should attempt to run with fewer receive
1012 				 * XXX buffers instead of just failing.
1013 				 */
1014 				epic_rxdrain(sc);
1015 				goto out;
1016 			}
1017 		} else
1018 			EPIC_INIT_RXDESC(sc, i);
1019 	}
1020 	sc->sc_rxptr = 0;
1021 
1022 	/*
1023 	 * Initialize the interrupt mask and enable interrupts.
1024 	 */
1025 	bus_space_write_4(st, sh, EPIC_INTMASK, INTMASK);
1026 	bus_space_write_4(st, sh, EPIC_GENCTL, genctl | GENCTL_INTENA);
1027 
1028 	/*
1029 	 * Give the transmit and receive rings to the EPIC.
1030 	 */
1031 	bus_space_write_4(st, sh, EPIC_PTCDAR,
1032 	    EPIC_CDTXADDR(sc, EPIC_NEXTTX(sc->sc_txlast)));
1033 	bus_space_write_4(st, sh, EPIC_PRCDAR,
1034 	    EPIC_CDRXADDR(sc, sc->sc_rxptr));
1035 
1036 	/*
1037 	 * Set the EPIC in motion.
1038 	 */
1039 	bus_space_write_4(st, sh, EPIC_COMMAND,
1040 	    COMMAND_RXQUEUED | COMMAND_START_RX);
1041 
1042 	/*
1043 	 * ...all done!
1044 	 */
1045 	ifp->if_flags |= IFF_RUNNING;
1046 	ifp->if_flags &= ~IFF_OACTIVE;
1047 
1048 	/*
1049 	 * Start the one second clock.
1050 	 */
1051 	callout_reset(&sc->sc_mii_callout, hz, epic_tick, sc);
1052 
1053 	/*
1054 	 * Attempt to start output on the interface.
1055 	 */
1056 	epic_start(ifp);
1057 
1058  out:
1059 	if (error)
1060 		aprint_error_dev(&sc->sc_dev, "interface not running\n");
1061 	return (error);
1062 }
1063 
1064 /*
1065  * Drain the receive queue.
1066  */
1067 void
1068 epic_rxdrain(sc)
1069 	struct epic_softc *sc;
1070 {
1071 	struct epic_descsoft *ds;
1072 	int i;
1073 
1074 	for (i = 0; i < EPIC_NRXDESC; i++) {
1075 		ds = EPIC_DSRX(sc, i);
1076 		if (ds->ds_mbuf != NULL) {
1077 			bus_dmamap_unload(sc->sc_dmat, ds->ds_dmamap);
1078 			m_freem(ds->ds_mbuf);
1079 			ds->ds_mbuf = NULL;
1080 		}
1081 	}
1082 }
1083 
1084 /*
1085  * Stop transmission on the interface.
1086  */
1087 void
1088 epic_stop(ifp, disable)
1089 	struct ifnet *ifp;
1090 	int disable;
1091 {
1092 	struct epic_softc *sc = ifp->if_softc;
1093 	bus_space_tag_t st = sc->sc_st;
1094 	bus_space_handle_t sh = sc->sc_sh;
1095 	struct epic_descsoft *ds;
1096 	uint32_t reg;
1097 	int i;
1098 
1099 	/*
1100 	 * Stop the one second clock.
1101 	 */
1102 	callout_stop(&sc->sc_mii_callout);
1103 
1104 	/* Down the MII. */
1105 	mii_down(&sc->sc_mii);
1106 
1107 	/* Paranoia... */
1108 	epic_fixup_clock_source(sc);
1109 
1110 	/*
1111 	 * Disable interrupts.
1112 	 */
1113 	reg = bus_space_read_4(st, sh, EPIC_GENCTL);
1114 	bus_space_write_4(st, sh, EPIC_GENCTL, reg & ~GENCTL_INTENA);
1115 	bus_space_write_4(st, sh, EPIC_INTMASK, 0);
1116 
1117 	/*
1118 	 * Stop the DMA engine and take the receiver off-line.
1119 	 */
1120 	bus_space_write_4(st, sh, EPIC_COMMAND, COMMAND_STOP_RDMA |
1121 	    COMMAND_STOP_TDMA | COMMAND_STOP_RX);
1122 
1123 	/*
1124 	 * Release any queued transmit buffers.
1125 	 */
1126 	for (i = 0; i < EPIC_NTXDESC; i++) {
1127 		ds = EPIC_DSTX(sc, i);
1128 		if (ds->ds_mbuf != NULL) {
1129 			bus_dmamap_unload(sc->sc_dmat, ds->ds_dmamap);
1130 			m_freem(ds->ds_mbuf);
1131 			ds->ds_mbuf = NULL;
1132 		}
1133 	}
1134 
1135 	/*
1136 	 * Mark the interface down and cancel the watchdog timer.
1137 	 */
1138 	ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
1139 	ifp->if_timer = 0;
1140 
1141 	if (disable)
1142 		epic_rxdrain(sc);
1143 }
1144 
1145 /*
1146  * Read the EPIC Serial EEPROM.
1147  */
1148 void
1149 epic_read_eeprom(sc, word, wordcnt, data)
1150 	struct epic_softc *sc;
1151 	int word, wordcnt;
1152 	uint16_t *data;
1153 {
1154 	bus_space_tag_t st = sc->sc_st;
1155 	bus_space_handle_t sh = sc->sc_sh;
1156 	uint16_t reg;
1157 	int i, x;
1158 
1159 #define	EEPROM_WAIT_READY(st, sh) \
1160 	while ((bus_space_read_4((st), (sh), EPIC_EECTL) & EECTL_EERDY) == 0) \
1161 		/* nothing */
1162 
1163 	/*
1164 	 * Enable the EEPROM.
1165 	 */
1166 	bus_space_write_4(st, sh, EPIC_EECTL, EECTL_ENABLE);
1167 	EEPROM_WAIT_READY(st, sh);
1168 
1169 	for (i = 0; i < wordcnt; i++) {
1170 		/* Send CHIP SELECT for one clock tick. */
1171 		bus_space_write_4(st, sh, EPIC_EECTL, EECTL_ENABLE|EECTL_EECS);
1172 		EEPROM_WAIT_READY(st, sh);
1173 
1174 		/* Shift in the READ opcode. */
1175 		for (x = 3; x > 0; x--) {
1176 			reg = EECTL_ENABLE|EECTL_EECS;
1177 			if (EPIC_EEPROM_OPC_READ & (1 << (x - 1)))
1178 				reg |= EECTL_EEDI;
1179 			bus_space_write_4(st, sh, EPIC_EECTL, reg);
1180 			EEPROM_WAIT_READY(st, sh);
1181 			bus_space_write_4(st, sh, EPIC_EECTL, reg|EECTL_EESK);
1182 			EEPROM_WAIT_READY(st, sh);
1183 			bus_space_write_4(st, sh, EPIC_EECTL, reg);
1184 			EEPROM_WAIT_READY(st, sh);
1185 		}
1186 
1187 		/* Shift in address. */
1188 		for (x = 6; x > 0; x--) {
1189 			reg = EECTL_ENABLE|EECTL_EECS;
1190 			if ((word + i) & (1 << (x - 1)))
1191 				reg |= EECTL_EEDI;
1192 			bus_space_write_4(st, sh, EPIC_EECTL, reg);
1193 			EEPROM_WAIT_READY(st, sh);
1194 			bus_space_write_4(st, sh, EPIC_EECTL, reg|EECTL_EESK);
1195 			EEPROM_WAIT_READY(st, sh);
1196 			bus_space_write_4(st, sh, EPIC_EECTL, reg);
1197 			EEPROM_WAIT_READY(st, sh);
1198 		}
1199 
1200 		/* Shift out data. */
1201 		reg = EECTL_ENABLE|EECTL_EECS;
1202 		data[i] = 0;
1203 		for (x = 16; x > 0; x--) {
1204 			bus_space_write_4(st, sh, EPIC_EECTL, reg|EECTL_EESK);
1205 			EEPROM_WAIT_READY(st, sh);
1206 			if (bus_space_read_4(st, sh, EPIC_EECTL) & EECTL_EEDO)
1207 				data[i] |= (1 << (x - 1));
1208 			bus_space_write_4(st, sh, EPIC_EECTL, reg);
1209 			EEPROM_WAIT_READY(st, sh);
1210 		}
1211 
1212 		/* Clear CHIP SELECT. */
1213 		bus_space_write_4(st, sh, EPIC_EECTL, EECTL_ENABLE);
1214 		EEPROM_WAIT_READY(st, sh);
1215 	}
1216 
1217 	/*
1218 	 * Disable the EEPROM.
1219 	 */
1220 	bus_space_write_4(st, sh, EPIC_EECTL, 0);
1221 
1222 #undef EEPROM_WAIT_READY
1223 }
1224 
1225 /*
1226  * Add a receive buffer to the indicated descriptor.
1227  */
1228 int
1229 epic_add_rxbuf(sc, idx)
1230 	struct epic_softc *sc;
1231 	int idx;
1232 {
1233 	struct epic_descsoft *ds = EPIC_DSRX(sc, idx);
1234 	struct mbuf *m;
1235 	int error;
1236 
1237 	MGETHDR(m, M_DONTWAIT, MT_DATA);
1238 	if (m == NULL)
1239 		return (ENOBUFS);
1240 
1241 	MCLGET(m, M_DONTWAIT);
1242 	if ((m->m_flags & M_EXT) == 0) {
1243 		m_freem(m);
1244 		return (ENOBUFS);
1245 	}
1246 
1247 	if (ds->ds_mbuf != NULL)
1248 		bus_dmamap_unload(sc->sc_dmat, ds->ds_dmamap);
1249 
1250 	ds->ds_mbuf = m;
1251 
1252 	error = bus_dmamap_load(sc->sc_dmat, ds->ds_dmamap,
1253 	    m->m_ext.ext_buf, m->m_ext.ext_size, NULL,
1254 	    BUS_DMA_READ|BUS_DMA_NOWAIT);
1255 	if (error) {
1256 		aprint_error_dev(&sc->sc_dev, "can't load rx DMA map %d, error = %d\n",
1257 		    idx, error);
1258 		panic("epic_add_rxbuf");	/* XXX */
1259 	}
1260 
1261 	bus_dmamap_sync(sc->sc_dmat, ds->ds_dmamap, 0,
1262 	    ds->ds_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
1263 
1264 	EPIC_INIT_RXDESC(sc, idx);
1265 
1266 	return (0);
1267 }
1268 
1269 /*
1270  * Set the EPIC multicast hash table.
1271  *
1272  * NOTE: We rely on a recently-updated mii_media_active here!
1273  */
1274 void
1275 epic_set_mchash(sc)
1276 	struct epic_softc *sc;
1277 {
1278 	struct ethercom *ec = &sc->sc_ethercom;
1279 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1280 	struct ether_multi *enm;
1281 	struct ether_multistep step;
1282 	uint32_t hash, mchash[4];
1283 
1284 	/*
1285 	 * Set up the multicast address filter by passing all multicast
1286 	 * addresses through a CRC generator, and then using the low-order
1287 	 * 6 bits as an index into the 64 bit multicast hash table (only
1288 	 * the lower 16 bits of each 32 bit multicast hash register are
1289 	 * valid).  The high order bits select the register, while the
1290 	 * rest of the bits select the bit within the register.
1291 	 */
1292 
1293 	if (ifp->if_flags & IFF_PROMISC)
1294 		goto allmulti;
1295 
1296 	if (IFM_SUBTYPE(sc->sc_mii.mii_media_active) == IFM_10_T) {
1297 		/* XXX hardware bug in 10Mbps mode. */
1298 		goto allmulti;
1299 	}
1300 
1301 	mchash[0] = mchash[1] = mchash[2] = mchash[3] = 0;
1302 
1303 	ETHER_FIRST_MULTI(step, ec, enm);
1304 	while (enm != NULL) {
1305 		if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
1306 			/*
1307 			 * We must listen to a range of multicast addresses.
1308 			 * For now, just accept all multicasts, rather than
1309 			 * trying to set only those filter bits needed to match
1310 			 * the range.  (At this time, the only use of address
1311 			 * ranges is for IP multicast routing, for which the
1312 			 * range is big enough to require all bits set.)
1313 			 */
1314 			goto allmulti;
1315 		}
1316 
1317 		hash = ether_crc32_be(enm->enm_addrlo, ETHER_ADDR_LEN);
1318 		hash >>= 26;
1319 
1320 		/* Set the corresponding bit in the hash table. */
1321 		mchash[hash >> 4] |= 1 << (hash & 0xf);
1322 
1323 		ETHER_NEXT_MULTI(step, enm);
1324 	}
1325 
1326 	ifp->if_flags &= ~IFF_ALLMULTI;
1327 	goto sethash;
1328 
1329  allmulti:
1330 	ifp->if_flags |= IFF_ALLMULTI;
1331 	mchash[0] = mchash[1] = mchash[2] = mchash[3] = 0xffff;
1332 
1333  sethash:
1334 	bus_space_write_4(sc->sc_st, sc->sc_sh, EPIC_MC0, mchash[0]);
1335 	bus_space_write_4(sc->sc_st, sc->sc_sh, EPIC_MC1, mchash[1]);
1336 	bus_space_write_4(sc->sc_st, sc->sc_sh, EPIC_MC2, mchash[2]);
1337 	bus_space_write_4(sc->sc_st, sc->sc_sh, EPIC_MC3, mchash[3]);
1338 }
1339 
1340 /*
1341  * Wait for the MII to become ready.
1342  */
1343 int
1344 epic_mii_wait(sc, rw)
1345 	struct epic_softc *sc;
1346 	uint32_t rw;
1347 {
1348 	int i;
1349 
1350 	for (i = 0; i < 50; i++) {
1351 		if ((bus_space_read_4(sc->sc_st, sc->sc_sh, EPIC_MMCTL) & rw)
1352 		    == 0)
1353 			break;
1354 		delay(2);
1355 	}
1356 	if (i == 50) {
1357 		aprint_error_dev(&sc->sc_dev, "MII timed out\n");
1358 		return (1);
1359 	}
1360 
1361 	return (0);
1362 }
1363 
1364 /*
1365  * Read from the MII.
1366  */
1367 int
1368 epic_mii_read(self, phy, reg)
1369 	struct device *self;
1370 	int phy, reg;
1371 {
1372 	struct epic_softc *sc = (struct epic_softc *)self;
1373 
1374 	if (epic_mii_wait(sc, MMCTL_WRITE))
1375 		return (0);
1376 
1377 	bus_space_write_4(sc->sc_st, sc->sc_sh, EPIC_MMCTL,
1378 	    MMCTL_ARG(phy, reg, MMCTL_READ));
1379 
1380 	if (epic_mii_wait(sc, MMCTL_READ))
1381 		return (0);
1382 
1383 	return (bus_space_read_4(sc->sc_st, sc->sc_sh, EPIC_MMDATA) &
1384 	    MMDATA_MASK);
1385 }
1386 
1387 /*
1388  * Write to the MII.
1389  */
1390 void
1391 epic_mii_write(self, phy, reg, val)
1392 	struct device *self;
1393 	int phy, reg, val;
1394 {
1395 	struct epic_softc *sc = (struct epic_softc *)self;
1396 
1397 	if (epic_mii_wait(sc, MMCTL_WRITE))
1398 		return;
1399 
1400 	bus_space_write_4(sc->sc_st, sc->sc_sh, EPIC_MMDATA, val);
1401 	bus_space_write_4(sc->sc_st, sc->sc_sh, EPIC_MMCTL,
1402 	    MMCTL_ARG(phy, reg, MMCTL_WRITE));
1403 }
1404 
1405 /*
1406  * Callback from PHY when media changes.
1407  */
1408 void
1409 epic_statchg(self)
1410 	struct device *self;
1411 {
1412 	struct epic_softc *sc = (struct epic_softc *)self;
1413 	uint32_t txcon, miicfg;
1414 
1415 	/*
1416 	 * Update loopback bits in TXCON to reflect duplex mode.
1417 	 */
1418 	txcon = bus_space_read_4(sc->sc_st, sc->sc_sh, EPIC_TXCON);
1419 	if (sc->sc_mii.mii_media_active & IFM_FDX)
1420 		txcon |= (TXCON_LOOPBACK_D1|TXCON_LOOPBACK_D2);
1421 	else
1422 		txcon &= ~(TXCON_LOOPBACK_D1|TXCON_LOOPBACK_D2);
1423 	bus_space_write_4(sc->sc_st, sc->sc_sh, EPIC_TXCON, txcon);
1424 
1425 	/* On some cards we need manualy set fullduplex led */
1426 	if (sc->sc_hwflags & EPIC_DUPLEXLED_ON_694) {
1427 		miicfg = bus_space_read_4(sc->sc_st, sc->sc_sh, EPIC_MIICFG);
1428 		if (IFM_OPTIONS(sc->sc_mii.mii_media_active) & IFM_FDX)
1429 			miicfg |= MIICFG_ENABLE;
1430 		else
1431 			miicfg &= ~MIICFG_ENABLE;
1432 		bus_space_write_4(sc->sc_st, sc->sc_sh, EPIC_MIICFG, miicfg);
1433 	}
1434 
1435 	/*
1436 	 * There is a multicast filter bug in 10Mbps mode.  Kick the
1437 	 * multicast filter in case the speed changed.
1438 	 */
1439 	epic_set_mchash(sc);
1440 }
1441 
1442 /*
1443  * Callback from ifmedia to request new media setting.
1444  *
1445  * XXX Looks to me like some of this complexity should move into
1446  * XXX one or two custom PHY drivers. --dyoung
1447  */
1448 int
1449 epic_mediachange(ifp)
1450 	struct ifnet *ifp;
1451 {
1452 	struct epic_softc *sc = ifp->if_softc;
1453 	struct mii_data *mii = &sc->sc_mii;
1454 	struct ifmedia *ifm = &mii->mii_media;
1455 	int media = ifm->ifm_cur->ifm_media;
1456 	uint32_t miicfg;
1457 	struct mii_softc *miisc;
1458 	int cfg, rc;
1459 
1460 	if ((ifp->if_flags & IFF_UP) == 0)
1461 		return (0);
1462 
1463 	if (IFM_INST(media) != sc->sc_serinst) {
1464 		/* If we're not selecting serial interface, select MII mode */
1465 #ifdef EPICMEDIADEBUG
1466 		printf("%s: parallel mode\n", ifp->if_xname);
1467 #endif
1468 		miicfg = bus_space_read_4(sc->sc_st, sc->sc_sh, EPIC_MIICFG);
1469 		miicfg &= ~MIICFG_SERMODEENA;
1470 		bus_space_write_4(sc->sc_st, sc->sc_sh, EPIC_MIICFG, miicfg);
1471 	}
1472 
1473 	if ((rc = mii_mediachg(mii)) == ENXIO)
1474 		rc = 0;
1475 
1476 	if (IFM_INST(media) == sc->sc_serinst) {
1477 		/* select serial interface */
1478 #ifdef EPICMEDIADEBUG
1479 		printf("%s: serial mode\n", ifp->if_xname);
1480 #endif
1481 		miicfg = bus_space_read_4(sc->sc_st, sc->sc_sh, EPIC_MIICFG);
1482 		miicfg |= (MIICFG_SERMODEENA | MIICFG_ENABLE);
1483 		bus_space_write_4(sc->sc_st, sc->sc_sh, EPIC_MIICFG, miicfg);
1484 
1485 		/* There is no driver to fill this */
1486 		mii->mii_media_active = media;
1487 		mii->mii_media_status = 0;
1488 
1489 		epic_statchg(&sc->sc_dev);
1490 		return (0);
1491 	}
1492 
1493 	/* Lookup selected PHY */
1494 	LIST_FOREACH(miisc, &mii->mii_phys, mii_list) {
1495 		if (IFM_INST(media) == miisc->mii_inst)
1496 			break;
1497 	}
1498 	if (!miisc) {
1499 		printf("epic_mediachange: can't happen\n"); /* ??? panic */
1500 		return (0);
1501 	}
1502 #ifdef EPICMEDIADEBUG
1503 	printf("%s: using phy %s\n", ifp->if_xname,
1504 	       device_xname(&miisc->mii_dev));
1505 #endif
1506 
1507 	if (miisc->mii_flags & MIIF_HAVEFIBER) {
1508 		/* XXX XXX assume it's a Level1 - should check */
1509 
1510 		/* We have to powerup fiber transceivers */
1511 		cfg = PHY_READ(miisc, MII_LXTPHY_CONFIG);
1512 		if (IFM_SUBTYPE(media) == IFM_100_FX) {
1513 #ifdef EPICMEDIADEBUG
1514 			printf("%s: power up fiber\n", ifp->if_xname);
1515 #endif
1516 			cfg |= (CONFIG_LEDC1 | CONFIG_LEDC0);
1517 		} else {
1518 #ifdef EPICMEDIADEBUG
1519 			printf("%s: power down fiber\n", ifp->if_xname);
1520 #endif
1521 			cfg &= ~(CONFIG_LEDC1 | CONFIG_LEDC0);
1522 		}
1523 		PHY_WRITE(miisc, MII_LXTPHY_CONFIG, cfg);
1524 	}
1525 
1526 	return rc;
1527 }
1528