xref: /openbsd-src/sys/dev/ic/dc.c (revision 8500990981f885cbe5e6a4958549cacc238b5ae6)
1 /*	$OpenBSD: dc.c,v 1.65 2003/10/21 18:58:49 jmc Exp $	*/
2 
3 /*
4  * Copyright (c) 1997, 1998, 1999
5  *	Bill Paul <wpaul@ee.columbia.edu>.  All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  * 3. All advertising materials mentioning features or use of this software
16  *    must display the following acknowledgement:
17  *	This product includes software developed by Bill Paul.
18  * 4. Neither the name of the author nor the names of any co-contributors
19  *    may be used to endorse or promote products derived from this software
20  *    without specific prior written permission.
21  *
22  * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
23  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25  * ARE DISCLAIMED.  IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
26  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
27  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
28  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
29  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
30  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
31  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
32  * THE POSSIBILITY OF SUCH DAMAGE.
33  *
34  * $FreeBSD: src/sys/pci/if_dc.c,v 1.43 2001/01/19 23:55:07 wpaul Exp $
35  */
36 
37 /*
38  * DEC "tulip" clone ethernet driver. Supports the DEC/Intel 21143
39  * series chips and several workalikes including the following:
40  *
41  * Macronix 98713/98715/98725/98727/98732 PMAC (www.macronix.com)
42  * Macronix/Lite-On 82c115 PNIC II (www.macronix.com)
43  * Lite-On 82c168/82c169 PNIC (www.litecom.com)
44  * ASIX Electronics AX88140A (www.asix.com.tw)
45  * ASIX Electronics AX88141 (www.asix.com.tw)
46  * ADMtek AL981 (www.admtek.com.tw)
47  * ADMtek AN983 (www.admtek.com.tw)
48  * Davicom DM9100, DM9102, DM9102A (www.davicom8.com)
49  * Accton EN1217, EN2242 (www.accton.com)
50  * Xircom X3201 (www.xircom.com)
51  *
52  * Datasheets for the 21143 are available at developer.intel.com.
53  * Datasheets for the clone parts can be found at their respective sites.
54  * (Except for the PNIC; see www.freebsd.org/~wpaul/PNIC/pnic.ps.gz.)
55  * The PNIC II is essentially a Macronix 98715A chip; the only difference
56  * worth noting is that its multicast hash table is only 128 bits wide
57  * instead of 512.
58  *
59  * Written by Bill Paul <wpaul@ee.columbia.edu>
60  * Electrical Engineering Department
61  * Columbia University, New York City
62  */
63 
64 /*
65  * The Intel 21143 is the successor to the DEC 21140. It is basically
66  * the same as the 21140 but with a few new features. The 21143 supports
67  * three kinds of media attachments:
68  *
69  * o MII port, for 10Mbps and 100Mbps support and NWAY
70  *   autonegotiation provided by an external PHY.
71  * o SYM port, for symbol mode 100Mbps support.
72  * o 10baseT port.
73  * o AUI/BNC port.
74  *
75  * The 100Mbps SYM port and 10baseT port can be used together in
76  * combination with the internal NWAY support to create a 10/100
77  * autosensing configuration.
78  *
79  * Note that not all tulip workalikes are handled in this driver: we only
80  * deal with those which are relatively well behaved. The Winbond is
81  * handled separately due to its different register offsets and the
82  * special handling needed for its various bugs. The PNIC is handled
83  * here, but I'm not thrilled about it.
84  *
85  * All of the workalike chips use some form of MII transceiver support
86  * with the exception of the Macronix chips, which also have a SYM port.
87  * The ASIX AX88140A is also documented to have a SYM port, but all
88  * the cards I've seen use an MII transceiver, probably because the
89  * AX88140A doesn't support internal NWAY.
90  */
91 
92 #include "bpfilter.h"
93 #include "vlan.h"
94 
95 #include <sys/param.h>
96 #include <sys/systm.h>
97 #include <sys/mbuf.h>
98 #include <sys/protosw.h>
99 #include <sys/socket.h>
100 #include <sys/ioctl.h>
101 #include <sys/errno.h>
102 #include <sys/malloc.h>
103 #include <sys/kernel.h>
104 #include <sys/device.h>
105 #include <sys/timeout.h>
106 
107 #include <net/if.h>
108 #include <net/if_dl.h>
109 #include <net/if_types.h>
110 
111 #ifdef INET
112 #include <netinet/in.h>
113 #include <netinet/in_systm.h>
114 #include <netinet/in_var.h>
115 #include <netinet/ip.h>
116 #include <netinet/if_ether.h>
117 #endif
118 
119 #include <net/if_media.h>
120 
121 #if NBPFILTER > 0
122 #include <net/bpf.h>
123 #endif
124 
125 #include <dev/mii/mii.h>
126 #include <dev/mii/miivar.h>
127 
128 #include <machine/bus.h>
129 #include <dev/pci/pcidevs.h>
130 
131 #include <dev/ic/dcreg.h>
132 
133 int dc_intr(void *);
134 void dc_shutdown(void *);
135 struct dc_type *dc_devtype(void *);
136 int dc_newbuf(struct dc_softc *, int, struct mbuf *);
137 int dc_encap(struct dc_softc *, struct mbuf *, u_int32_t *);
138 int dc_coal(struct dc_softc *, struct mbuf **);
139 
140 void dc_pnic_rx_bug_war(struct dc_softc *, int);
141 int dc_rx_resync(struct dc_softc *);
142 void dc_rxeof(struct dc_softc *);
143 void dc_txeof(struct dc_softc *);
144 void dc_tick(void *);
145 void dc_start(struct ifnet *);
146 int dc_ioctl(struct ifnet *, u_long, caddr_t);
147 void dc_init(void *);
148 void dc_stop(struct dc_softc *);
149 void dc_watchdog(struct ifnet *);
150 int dc_ifmedia_upd(struct ifnet *);
151 void dc_ifmedia_sts(struct ifnet *, struct ifmediareq *);
152 
153 void dc_delay(struct dc_softc *);
154 void dc_eeprom_width(struct dc_softc *);
155 void dc_eeprom_idle(struct dc_softc *);
156 void dc_eeprom_putbyte(struct dc_softc *, int);
157 void dc_eeprom_getword(struct dc_softc *, int, u_int16_t *);
158 void dc_eeprom_getword_pnic(struct dc_softc *, int, u_int16_t *);
159 void dc_read_eeprom(struct dc_softc *, caddr_t, int, int, int);
160 
161 void dc_mii_writebit(struct dc_softc *, int);
162 int dc_mii_readbit(struct dc_softc *);
163 void dc_mii_sync(struct dc_softc *);
164 void dc_mii_send(struct dc_softc *, u_int32_t, int);
165 int dc_mii_readreg(struct dc_softc *, struct dc_mii_frame *);
166 int dc_mii_writereg(struct dc_softc *, struct dc_mii_frame *);
167 int dc_miibus_readreg(struct device *, int, int);
168 void dc_miibus_writereg(struct device *, int, int, int);
169 void dc_miibus_statchg(struct device *);
170 
171 void dc_setcfg(struct dc_softc *, int);
172 u_int32_t dc_crc_le(struct dc_softc *, caddr_t);
173 u_int32_t dc_crc_be(caddr_t);
174 void dc_setfilt_21143(struct dc_softc *);
175 void dc_setfilt_asix(struct dc_softc *);
176 void dc_setfilt_admtek(struct dc_softc *);
177 void dc_setfilt_xircom(struct dc_softc *);
178 
179 void dc_setfilt(struct dc_softc *);
180 
181 void dc_reset(struct dc_softc *);
182 int dc_list_rx_init(struct dc_softc *);
183 int dc_list_tx_init(struct dc_softc *);
184 
185 void dc_read_srom(struct dc_softc *, int);
186 void dc_parse_21143_srom(struct dc_softc *);
187 void dc_decode_leaf_sia(struct dc_softc *,
188 				     struct dc_eblock_sia *);
189 void dc_decode_leaf_mii(struct dc_softc *,
190 				     struct dc_eblock_mii *);
191 void dc_decode_leaf_sym(struct dc_softc *,
192 				     struct dc_eblock_sym *);
193 void dc_apply_fixup(struct dc_softc *, int);
194 
195 #define DC_SETBIT(sc, reg, x)				\
196 	CSR_WRITE_4(sc, reg, CSR_READ_4(sc, reg) | (x))
197 
198 #define DC_CLRBIT(sc, reg, x)				\
199 	CSR_WRITE_4(sc, reg, CSR_READ_4(sc, reg) & ~(x))
200 
201 #define SIO_SET(x)	DC_SETBIT(sc, DC_SIO, (x))
202 #define SIO_CLR(x)	DC_CLRBIT(sc, DC_SIO, (x))
203 
204 void
205 dc_delay(sc)
206 	struct dc_softc *sc;
207 {
208 	int idx;
209 
210 	for (idx = (300 / 33) + 1; idx > 0; idx--)
211 		CSR_READ_4(sc, DC_BUSCTL);
212 }
213 
214 void
215 dc_eeprom_width(sc)
216 	struct dc_softc *sc;
217 {
218 	int i;
219 
220 	/* Force EEPROM to idle state. */
221 	dc_eeprom_idle(sc);
222 
223 	/* Enter EEPROM access mode. */
224 	CSR_WRITE_4(sc, DC_SIO, DC_SIO_EESEL);
225 	dc_delay(sc);
226 	DC_SETBIT(sc, DC_SIO, DC_SIO_ROMCTL_READ);
227 	dc_delay(sc);
228 	DC_CLRBIT(sc, DC_SIO, DC_SIO_EE_CLK);
229 	dc_delay(sc);
230 	DC_SETBIT(sc, DC_SIO, DC_SIO_EE_CS);
231 	dc_delay(sc);
232 
233 	for (i = 3; i--;) {
234 		if (6 & (1 << i))
235 			DC_SETBIT(sc, DC_SIO, DC_SIO_EE_DATAIN);
236 		else
237 			DC_CLRBIT(sc, DC_SIO, DC_SIO_EE_DATAIN);
238 		dc_delay(sc);
239 		DC_SETBIT(sc, DC_SIO, DC_SIO_EE_CLK);
240 		dc_delay(sc);
241 		DC_CLRBIT(sc, DC_SIO, DC_SIO_EE_CLK);
242 		dc_delay(sc);
243 	}
244 
245 	for (i = 1; i <= 12; i++) {
246 		DC_SETBIT(sc, DC_SIO, DC_SIO_EE_CLK);
247 		dc_delay(sc);
248 		if (!(CSR_READ_4(sc, DC_SIO) & DC_SIO_EE_DATAOUT)) {
249 			DC_CLRBIT(sc, DC_SIO, DC_SIO_EE_CLK);
250 			dc_delay(sc);
251 			break;
252 		}
253 		DC_CLRBIT(sc, DC_SIO, DC_SIO_EE_CLK);
254 		dc_delay(sc);
255 	}
256 
257 	/* Turn off EEPROM access mode. */
258 	dc_eeprom_idle(sc);
259 
260 	if (i < 4 || i > 12)
261 		sc->dc_romwidth = 6;
262 	else
263 		sc->dc_romwidth = i;
264 
265 	/* Enter EEPROM access mode. */
266 	CSR_WRITE_4(sc, DC_SIO, DC_SIO_EESEL);
267 	dc_delay(sc);
268 	DC_SETBIT(sc, DC_SIO, DC_SIO_ROMCTL_READ);
269 	dc_delay(sc);
270 	DC_CLRBIT(sc, DC_SIO, DC_SIO_EE_CLK);
271 	dc_delay(sc);
272 	DC_SETBIT(sc, DC_SIO, DC_SIO_EE_CS);
273 	dc_delay(sc);
274 
275 	/* Turn off EEPROM access mode. */
276 	dc_eeprom_idle(sc);
277 }
278 
279 void
280 dc_eeprom_idle(sc)
281 	struct dc_softc *sc;
282 {
283 	int i;
284 
285 	CSR_WRITE_4(sc, DC_SIO, DC_SIO_EESEL);
286 	dc_delay(sc);
287 	DC_SETBIT(sc, DC_SIO, DC_SIO_ROMCTL_READ);
288 	dc_delay(sc);
289 	DC_CLRBIT(sc, DC_SIO, DC_SIO_EE_CLK);
290 	dc_delay(sc);
291 	DC_SETBIT(sc, DC_SIO, DC_SIO_EE_CS);
292 	dc_delay(sc);
293 
294 	for (i = 0; i < 25; i++) {
295 		DC_CLRBIT(sc, DC_SIO, DC_SIO_EE_CLK);
296 		dc_delay(sc);
297 		DC_SETBIT(sc, DC_SIO, DC_SIO_EE_CLK);
298 		dc_delay(sc);
299 	}
300 
301 	DC_CLRBIT(sc, DC_SIO, DC_SIO_EE_CLK);
302 	dc_delay(sc);
303 	DC_CLRBIT(sc, DC_SIO, DC_SIO_EE_CS);
304 	dc_delay(sc);
305 	CSR_WRITE_4(sc, DC_SIO, 0x00000000);
306 }
307 
308 /*
309  * Send a read command and address to the EEPROM, check for ACK.
310  */
311 void
312 dc_eeprom_putbyte(sc, addr)
313 	struct dc_softc *sc;
314 	int addr;
315 {
316 	int d, i;
317 
318 	d = DC_EECMD_READ >> 6;
319 
320 	for (i = 3; i--; ) {
321 		if (d & (1 << i))
322 			DC_SETBIT(sc, DC_SIO, DC_SIO_EE_DATAIN);
323 		else
324 			DC_CLRBIT(sc, DC_SIO, DC_SIO_EE_DATAIN);
325 		dc_delay(sc);
326 		DC_SETBIT(sc, DC_SIO, DC_SIO_EE_CLK);
327 		dc_delay(sc);
328 		DC_CLRBIT(sc, DC_SIO, DC_SIO_EE_CLK);
329 		dc_delay(sc);
330 	}
331 
332 	/*
333 	 * Feed in each bit and strobe the clock.
334 	 */
335 	for (i = sc->dc_romwidth; i--;) {
336 		if (addr & (1 << i)) {
337 			SIO_SET(DC_SIO_EE_DATAIN);
338 		} else {
339 			SIO_CLR(DC_SIO_EE_DATAIN);
340 		}
341 		dc_delay(sc);
342 		SIO_SET(DC_SIO_EE_CLK);
343 		dc_delay(sc);
344 		SIO_CLR(DC_SIO_EE_CLK);
345 		dc_delay(sc);
346 	}
347 }
348 
349 /*
350  * Read a word of data stored in the EEPROM at address 'addr.'
351  * The PNIC 82c168/82c169 has its own non-standard way to read
352  * the EEPROM.
353  */
354 void
355 dc_eeprom_getword_pnic(sc, addr, dest)
356 	struct dc_softc *sc;
357 	int addr;
358 	u_int16_t *dest;
359 {
360 	int i;
361 	u_int32_t r;
362 
363 	CSR_WRITE_4(sc, DC_PN_SIOCTL, DC_PN_EEOPCODE_READ|addr);
364 
365 	for (i = 0; i < DC_TIMEOUT; i++) {
366 		DELAY(1);
367 		r = CSR_READ_4(sc, DC_SIO);
368 		if (!(r & DC_PN_SIOCTL_BUSY)) {
369 			*dest = (u_int16_t)(r & 0xFFFF);
370 			return;
371 		}
372 	}
373 }
374 
375 /*
376  * Read a word of data stored in the EEPROM at address 'addr.'
377  */
378 void
379 dc_eeprom_getword(sc, addr, dest)
380 	struct dc_softc *sc;
381 	int addr;
382 	u_int16_t *dest;
383 {
384 	int i;
385 	u_int16_t word = 0;
386 
387 	/* Force EEPROM to idle state. */
388 	dc_eeprom_idle(sc);
389 
390 	/* Enter EEPROM access mode. */
391 	CSR_WRITE_4(sc, DC_SIO, DC_SIO_EESEL);
392 	dc_delay(sc);
393 	DC_SETBIT(sc, DC_SIO,  DC_SIO_ROMCTL_READ);
394 	dc_delay(sc);
395 	DC_CLRBIT(sc, DC_SIO, DC_SIO_EE_CLK);
396 	dc_delay(sc);
397 	DC_SETBIT(sc, DC_SIO, DC_SIO_EE_CS);
398 	dc_delay(sc);
399 
400 	/*
401 	 * Send address of word we want to read.
402 	 */
403 	dc_eeprom_putbyte(sc, addr);
404 
405 	/*
406 	 * Start reading bits from EEPROM.
407 	 */
408 	for (i = 0x8000; i; i >>= 1) {
409 		SIO_SET(DC_SIO_EE_CLK);
410 		dc_delay(sc);
411 		if (CSR_READ_4(sc, DC_SIO) & DC_SIO_EE_DATAOUT)
412 			word |= i;
413 		dc_delay(sc);
414 		SIO_CLR(DC_SIO_EE_CLK);
415 		dc_delay(sc);
416 	}
417 
418 	/* Turn off EEPROM access mode. */
419 	dc_eeprom_idle(sc);
420 
421 	*dest = word;
422 }
423 
424 /*
425  * Read a sequence of words from the EEPROM.
426  */
427 void dc_read_eeprom(sc, dest, off, cnt, swap)
428 	struct dc_softc *sc;
429 	caddr_t dest;
430 	int off, cnt, swap;
431 {
432 	int i;
433 	u_int16_t word = 0, *ptr;
434 
435 	for (i = 0; i < cnt; i++) {
436 		if (DC_IS_PNIC(sc))
437 			dc_eeprom_getword_pnic(sc, off + i, &word);
438 		else
439 			dc_eeprom_getword(sc, off + i, &word);
440 		ptr = (u_int16_t *)(dest + (i * 2));
441 		if (swap)
442 			*ptr = betoh16(word);
443 		else
444 			*ptr = letoh16(word);
445 	}
446 }
447 
448 /*
449  * The following two routines are taken from the Macronix 98713
450  * Application Notes pp.19-21.
451  */
452 /*
453  * Write a bit to the MII bus.
454  */
455 void
456 dc_mii_writebit(sc, bit)
457 	struct dc_softc *sc;
458 	int bit;
459 {
460 	if (bit)
461 		CSR_WRITE_4(sc, DC_SIO,
462 		    DC_SIO_ROMCTL_WRITE|DC_SIO_MII_DATAOUT);
463 	else
464 		CSR_WRITE_4(sc, DC_SIO, DC_SIO_ROMCTL_WRITE);
465 
466 	DC_SETBIT(sc, DC_SIO, DC_SIO_MII_CLK);
467 	DC_CLRBIT(sc, DC_SIO, DC_SIO_MII_CLK);
468 }
469 
470 /*
471  * Read a bit from the MII bus.
472  */
473 int
474 dc_mii_readbit(sc)
475 	struct dc_softc *sc;
476 {
477 	CSR_WRITE_4(sc, DC_SIO, DC_SIO_ROMCTL_READ|DC_SIO_MII_DIR);
478 	CSR_READ_4(sc, DC_SIO);
479 	DC_SETBIT(sc, DC_SIO, DC_SIO_MII_CLK);
480 	DC_CLRBIT(sc, DC_SIO, DC_SIO_MII_CLK);
481 	if (CSR_READ_4(sc, DC_SIO) & DC_SIO_MII_DATAIN)
482 		return (1);
483 	return (0);
484 }
485 
486 /*
487  * Sync the PHYs by setting data bit and strobing the clock 32 times.
488  */
489 void
490 dc_mii_sync(sc)
491 	struct dc_softc *sc;
492 {
493 	int i;
494 
495 	CSR_WRITE_4(sc, DC_SIO, DC_SIO_ROMCTL_WRITE);
496 
497 	for (i = 0; i < 32; i++)
498 		dc_mii_writebit(sc, 1);
499 }
500 
501 /*
502  * Clock a series of bits through the MII.
503  */
504 void
505 dc_mii_send(sc, bits, cnt)
506 	struct dc_softc *sc;
507 	u_int32_t bits;
508 	int cnt;
509 {
510 	int i;
511 
512 	for (i = (0x1 << (cnt - 1)); i; i >>= 1)
513 		dc_mii_writebit(sc, bits & i);
514 }
515 
516 /*
517  * Read an PHY register through the MII.
518  */
519 int
520 dc_mii_readreg(sc, frame)
521 	struct dc_softc *sc;
522 	struct dc_mii_frame *frame;
523 {
524 	int i, ack, s;
525 
526 	s = splimp();
527 
528 	/*
529 	 * Set up frame for RX.
530 	 */
531 	frame->mii_stdelim = DC_MII_STARTDELIM;
532 	frame->mii_opcode = DC_MII_READOP;
533 	frame->mii_turnaround = 0;
534 	frame->mii_data = 0;
535 
536 	/*
537 	 * Sync the PHYs.
538 	 */
539 	dc_mii_sync(sc);
540 
541 	/*
542 	 * Send command/address info.
543 	 */
544 	dc_mii_send(sc, frame->mii_stdelim, 2);
545 	dc_mii_send(sc, frame->mii_opcode, 2);
546 	dc_mii_send(sc, frame->mii_phyaddr, 5);
547 	dc_mii_send(sc, frame->mii_regaddr, 5);
548 
549 #ifdef notdef
550 	/* Idle bit */
551 	dc_mii_writebit(sc, 1);
552 	dc_mii_writebit(sc, 0);
553 #endif
554 
555 	/* Check for ack */
556 	ack = dc_mii_readbit(sc);
557 
558 	/*
559 	 * Now try reading data bits. If the ack failed, we still
560 	 * need to clock through 16 cycles to keep the PHY(s) in sync.
561 	 */
562 	if (ack) {
563 		for(i = 0; i < 16; i++) {
564 			dc_mii_readbit(sc);
565 		}
566 		goto fail;
567 	}
568 
569 	for (i = 0x8000; i; i >>= 1) {
570 		if (dc_mii_readbit(sc))
571 			frame->mii_data |= i;
572 	}
573 
574 fail:
575 
576 	dc_mii_writebit(sc, 0);
577 	dc_mii_writebit(sc, 0);
578 
579 	splx(s);
580 
581 	if (ack)
582 		return (1);
583 	return (0);
584 }
585 
586 /*
587  * Write to a PHY register through the MII.
588  */
589 int
590 dc_mii_writereg(sc, frame)
591 	struct dc_softc *sc;
592 	struct dc_mii_frame *frame;
593 {
594 	int s;
595 
596 	s = splimp();
597 	/*
598 	 * Set up frame for TX.
599 	 */
600 
601 	frame->mii_stdelim = DC_MII_STARTDELIM;
602 	frame->mii_opcode = DC_MII_WRITEOP;
603 	frame->mii_turnaround = DC_MII_TURNAROUND;
604 
605 	/*
606 	 * Sync the PHYs.
607 	 */
608 	dc_mii_sync(sc);
609 
610 	dc_mii_send(sc, frame->mii_stdelim, 2);
611 	dc_mii_send(sc, frame->mii_opcode, 2);
612 	dc_mii_send(sc, frame->mii_phyaddr, 5);
613 	dc_mii_send(sc, frame->mii_regaddr, 5);
614 	dc_mii_send(sc, frame->mii_turnaround, 2);
615 	dc_mii_send(sc, frame->mii_data, 16);
616 
617 	/* Idle bit. */
618 	dc_mii_writebit(sc, 0);
619 	dc_mii_writebit(sc, 0);
620 
621 	splx(s);
622 	return (0);
623 }
624 
625 int
626 dc_miibus_readreg(self, phy, reg)
627 	struct device *self;
628 	int phy, reg;
629 {
630 	struct dc_mii_frame frame;
631 	struct dc_softc *sc = (struct dc_softc *)self;
632 	int i, rval, phy_reg;
633 
634 	/*
635 	 * Note: both the AL981 and AN983 have internal PHYs,
636 	 * however the AL981 provides direct access to the PHY
637 	 * registers while the AN983 uses a serial MII interface.
638 	 * The AN983's MII interface is also buggy in that you
639 	 * can read from any MII address (0 to 31), but only address 1
640 	 * behaves normally. To deal with both cases, we pretend
641 	 * that the PHY is at MII address 1.
642 	 */
643 	if (DC_IS_ADMTEK(sc) && phy != DC_ADMTEK_PHYADDR)
644 		return (0);
645 
646 	/*
647 	 * Note: the ukphy probs of the RS7112 report a PHY at
648 	 * MII address 0 (possibly HomePNA?) and 1 (ethernet)
649 	 * so we only respond to correct one.
650 	 */
651 	if (DC_IS_CONEXANT(sc) && phy != DC_CONEXANT_PHYADDR)
652 		return (0);
653 
654 	if (sc->dc_pmode != DC_PMODE_MII) {
655 		if (phy == (MII_NPHY - 1)) {
656 			switch(reg) {
657 			case MII_BMSR:
658 				/*
659 				 * Fake something to make the probe
660 				 * code think there's a PHY here.
661 				 */
662 				return (BMSR_MEDIAMASK);
663 				break;
664 			case MII_PHYIDR1:
665 				if (DC_IS_PNIC(sc))
666 					return (PCI_VENDOR_LITEON);
667 				return (PCI_VENDOR_DEC);
668 				break;
669 			case MII_PHYIDR2:
670 				if (DC_IS_PNIC(sc))
671 					return (PCI_PRODUCT_LITEON_PNIC);
672 				return (PCI_PRODUCT_DEC_21142);
673 				break;
674 			default:
675 				return (0);
676 				break;
677 			}
678 		} else
679 			return (0);
680 	}
681 
682 	if (DC_IS_PNIC(sc)) {
683 		CSR_WRITE_4(sc, DC_PN_MII, DC_PN_MIIOPCODE_READ |
684 		    (phy << 23) | (reg << 18));
685 		for (i = 0; i < DC_TIMEOUT; i++) {
686 			DELAY(1);
687 			rval = CSR_READ_4(sc, DC_PN_MII);
688 			if (!(rval & DC_PN_MII_BUSY)) {
689 				rval &= 0xFFFF;
690 				return (rval == 0xFFFF ? 0 : rval);
691 			}
692 		}
693 		return (0);
694 	}
695 
696 	if (DC_IS_COMET(sc)) {
697 		switch(reg) {
698 		case MII_BMCR:
699 			phy_reg = DC_AL_BMCR;
700 			break;
701 		case MII_BMSR:
702 			phy_reg = DC_AL_BMSR;
703 			break;
704 		case MII_PHYIDR1:
705 			phy_reg = DC_AL_VENID;
706 			break;
707 		case MII_PHYIDR2:
708 			phy_reg = DC_AL_DEVID;
709 			break;
710 		case MII_ANAR:
711 			phy_reg = DC_AL_ANAR;
712 			break;
713 		case MII_ANLPAR:
714 			phy_reg = DC_AL_LPAR;
715 			break;
716 		case MII_ANER:
717 			phy_reg = DC_AL_ANER;
718 			break;
719 		default:
720 			printf("%s: phy_read: bad phy register %x\n",
721 			    sc->sc_dev.dv_xname, reg);
722 			return (0);
723 			break;
724 		}
725 
726 		rval = CSR_READ_4(sc, phy_reg) & 0x0000FFFF;
727 
728 		if (rval == 0xFFFF)
729 			return (0);
730 		return (rval);
731 	}
732 
733 	bzero(&frame, sizeof(frame));
734 
735 	frame.mii_phyaddr = phy;
736 	frame.mii_regaddr = reg;
737 	if (sc->dc_type == DC_TYPE_98713) {
738 		phy_reg = CSR_READ_4(sc, DC_NETCFG);
739 		CSR_WRITE_4(sc, DC_NETCFG, phy_reg & ~DC_NETCFG_PORTSEL);
740 	}
741 	dc_mii_readreg(sc, &frame);
742 	if (sc->dc_type == DC_TYPE_98713)
743 		CSR_WRITE_4(sc, DC_NETCFG, phy_reg);
744 
745 	return (frame.mii_data);
746 }
747 
748 void
749 dc_miibus_writereg(self, phy, reg, data)
750 	struct device *self;
751 	int phy, reg, data;
752 {
753 	struct dc_softc *sc = (struct dc_softc *)self;
754 	struct dc_mii_frame frame;
755 	int i, phy_reg;
756 
757 	bzero((char *)&frame, sizeof(frame));
758 
759 	if (DC_IS_ADMTEK(sc) && phy != DC_ADMTEK_PHYADDR)
760 		return;
761 	if (DC_IS_CONEXANT(sc) && phy != DC_CONEXANT_PHYADDR)
762 		return;
763 
764 	if (DC_IS_PNIC(sc)) {
765 		CSR_WRITE_4(sc, DC_PN_MII, DC_PN_MIIOPCODE_WRITE |
766 		    (phy << 23) | (reg << 10) | data);
767 		for (i = 0; i < DC_TIMEOUT; i++) {
768 			if (!(CSR_READ_4(sc, DC_PN_MII) & DC_PN_MII_BUSY))
769 				break;
770 		}
771 		return;
772 	}
773 
774 	if (DC_IS_COMET(sc)) {
775 		switch(reg) {
776 		case MII_BMCR:
777 			phy_reg = DC_AL_BMCR;
778 			break;
779 		case MII_BMSR:
780 			phy_reg = DC_AL_BMSR;
781 			break;
782 		case MII_PHYIDR1:
783 			phy_reg = DC_AL_VENID;
784 			break;
785 		case MII_PHYIDR2:
786 			phy_reg = DC_AL_DEVID;
787 			break;
788 		case MII_ANAR:
789 			phy_reg = DC_AL_ANAR;
790 			break;
791 		case MII_ANLPAR:
792 			phy_reg = DC_AL_LPAR;
793 			break;
794 		case MII_ANER:
795 			phy_reg = DC_AL_ANER;
796 			break;
797 		default:
798 			printf("%s: phy_write: bad phy register %x\n",
799 			    sc->sc_dev.dv_xname, reg);
800 			return;
801 			break;
802 		}
803 
804 		CSR_WRITE_4(sc, phy_reg, data);
805 		return;
806 	}
807 
808 	frame.mii_phyaddr = phy;
809 	frame.mii_regaddr = reg;
810 	frame.mii_data = data;
811 
812 	if (sc->dc_type == DC_TYPE_98713) {
813 		phy_reg = CSR_READ_4(sc, DC_NETCFG);
814 		CSR_WRITE_4(sc, DC_NETCFG, phy_reg & ~DC_NETCFG_PORTSEL);
815 	}
816 	dc_mii_writereg(sc, &frame);
817 	if (sc->dc_type == DC_TYPE_98713)
818 		CSR_WRITE_4(sc, DC_NETCFG, phy_reg);
819 }
820 
821 void
822 dc_miibus_statchg(self)
823 	struct device *self;
824 {
825 	struct dc_softc *sc = (struct dc_softc *)self;
826 	struct mii_data *mii;
827 	struct ifmedia *ifm;
828 
829 	if (DC_IS_ADMTEK(sc))
830 		return;
831 
832 	mii = &sc->sc_mii;
833 	ifm = &mii->mii_media;
834 	if (DC_IS_DAVICOM(sc) && IFM_SUBTYPE(ifm->ifm_media) == IFM_HPNA_1) {
835 		dc_setcfg(sc, ifm->ifm_media);
836 		sc->dc_if_media = ifm->ifm_media;
837 	} else {
838 		dc_setcfg(sc, mii->mii_media_active);
839 		sc->dc_if_media = mii->mii_media_active;
840 	}
841 }
842 
843 #define DC_POLY		0xEDB88320
844 #define DC_BITS_512	9
845 #define DC_BITS_128	7
846 #define DC_BITS_64	6
847 
848 u_int32_t
849 dc_crc_le(sc, addr)
850 	struct dc_softc *sc;
851 	caddr_t addr;
852 {
853 	u_int32_t idx, bit, data, crc;
854 
855 	/* Compute CRC for the address value. */
856 	crc = 0xFFFFFFFF; /* initial value */
857 
858 	for (idx = 0; idx < 6; idx++) {
859 		for (data = *addr++, bit = 0; bit < 8; bit++, data >>= 1)
860 			crc = (crc >> 1) ^ (((crc ^ data) & 1) ? DC_POLY : 0);
861 	}
862 
863 	/*
864 	 * The hash table on the PNIC II and the MX98715AEC-C/D/E
865 	 * chips is only 128 bits wide.
866 	 */
867 	if (sc->dc_flags & DC_128BIT_HASH)
868 		return (crc & ((1 << DC_BITS_128) - 1));
869 
870 	/* The hash table on the MX98715BEC is only 64 bits wide. */
871 	if (sc->dc_flags & DC_64BIT_HASH)
872 		return (crc & ((1 << DC_BITS_64) - 1));
873 
874 	/* Xircom's hash filtering table is different (read: weird) */
875 	/* Xircom uses the LEAST significant bits */
876 	if (DC_IS_XIRCOM(sc)) {
877 		if ((crc & 0x180) == 0x180)
878 			return (crc & 0x0F) + (crc	& 0x70)*3 + (14 << 4);
879 		else
880 			return (crc & 0x1F) + ((crc>>1) & 0xF0)*3 + (12 << 4);
881 	}
882 
883 	return (crc & ((1 << DC_BITS_512) - 1));
884 }
885 
886 /*
887  * Calculate CRC of a multicast group address, return the lower 6 bits.
888  */
889 u_int32_t
890 dc_crc_be(addr)
891 	caddr_t addr;
892 {
893 	u_int32_t crc, carry;
894 	int i, j;
895 	u_int8_t c;
896 
897 	/* Compute CRC for the address value. */
898 	crc = 0xFFFFFFFF; /* initial value */
899 
900 	for (i = 0; i < 6; i++) {
901 		c = *(addr + i);
902 		for (j = 0; j < 8; j++) {
903 			carry = ((crc & 0x80000000) ? 1 : 0) ^ (c & 0x01);
904 			crc <<= 1;
905 			c >>= 1;
906 			if (carry)
907 				crc = (crc ^ 0x04c11db6) | carry;
908 		}
909 	}
910 
911 	/* return the filter bit position */
912 	return ((crc >> 26) & 0x0000003F);
913 }
914 
915 /*
916  * 21143-style RX filter setup routine. Filter programming is done by
917  * downloading a special setup frame into the TX engine. 21143, Macronix,
918  * PNIC, PNIC II and Davicom chips are programmed this way.
919  *
920  * We always program the chip using 'hash perfect' mode, i.e. one perfect
921  * address (our node address) and a 512-bit hash filter for multicast
922  * frames. We also sneak the broadcast address into the hash filter since
923  * we need that too.
924  */
925 void
926 dc_setfilt_21143(sc)
927 	struct dc_softc *sc;
928 {
929 	struct dc_desc *sframe;
930 	u_int32_t h, *sp;
931 	struct arpcom *ac = &sc->sc_arpcom;
932 	struct ether_multi *enm;
933 	struct ether_multistep step;
934 	struct ifnet *ifp;
935 	int i;
936 
937 	ifp = &sc->sc_arpcom.ac_if;
938 
939 	i = sc->dc_cdata.dc_tx_prod;
940 	DC_INC(sc->dc_cdata.dc_tx_prod, DC_TX_LIST_CNT);
941 	sc->dc_cdata.dc_tx_cnt++;
942 	sframe = &sc->dc_ldata->dc_tx_list[i];
943 	sp = &sc->dc_ldata->dc_sbuf[0];
944 	bzero((char *)sp, DC_SFRAME_LEN);
945 
946 	sframe->dc_data = htole32(sc->sc_listmap->dm_segs[0].ds_addr +
947 	    offsetof(struct dc_list_data, dc_sbuf));
948 	sframe->dc_ctl = htole32(DC_SFRAME_LEN | DC_TXCTL_SETUP |
949 	    DC_TXCTL_TLINK | DC_FILTER_HASHPERF | DC_TXCTL_FINT);
950 
951 	sc->dc_cdata.dc_tx_chain[i].sd_mbuf =
952 	    (struct mbuf *)&sc->dc_ldata->dc_sbuf[0];
953 
954 	/* If we want promiscuous mode, set the allframes bit. */
955 	if (ifp->if_flags & IFF_PROMISC)
956 		DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_RX_PROMISC);
957 	else
958 		DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_RX_PROMISC);
959 
960 	if (ifp->if_flags & IFF_ALLMULTI)
961 		DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_RX_ALLMULTI);
962 	else
963 		DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_RX_ALLMULTI);
964 
965 	ETHER_FIRST_MULTI(step, ac, enm);
966 	while (enm != NULL) {
967 		h = dc_crc_le(sc, enm->enm_addrlo);
968 		sp[h >> 4] |= htole32(1 << (h & 0xF));
969 		ETHER_NEXT_MULTI(step, enm);
970 	}
971 
972 	if (ifp->if_flags & IFF_BROADCAST) {
973 		h = dc_crc_le(sc, (caddr_t)&etherbroadcastaddr);
974 		sp[h >> 4] |= htole32(1 << (h & 0xF));
975 	}
976 
977 	/* Set our MAC address */
978 	sp[39] = DC_SP_FIELD(sc->sc_arpcom.ac_enaddr, 0);
979 	sp[40] = DC_SP_FIELD(sc->sc_arpcom.ac_enaddr, 1);
980 	sp[41] = DC_SP_FIELD(sc->sc_arpcom.ac_enaddr, 2);
981 
982 	bus_dmamap_sync(sc->sc_dmat, sc->sc_listmap,
983 	    offsetof(struct dc_list_data, dc_sbuf[0]),
984 	    sizeof(struct dc_list_data) -
985 	    offsetof(struct dc_list_data, dc_sbuf[0]),
986 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
987 
988 	sframe->dc_status = htole32(DC_TXSTAT_OWN);
989 
990 	bus_dmamap_sync(sc->sc_dmat, sc->sc_listmap,
991 	    offsetof(struct dc_list_data, dc_tx_list[i]),
992 	    sizeof(struct dc_desc), BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
993 
994 	CSR_WRITE_4(sc, DC_TXSTART, 0xFFFFFFFF);
995 
996 	/*
997 	 * The PNIC takes an exceedingly long time to process its
998 	 * setup frame; wait 10ms after posting the setup frame
999 	 * before proceeding, just so it has time to swallow its
1000 	 * medicine.
1001 	 */
1002 	DELAY(10000);
1003 
1004 	ifp->if_timer = 5;
1005 }
1006 
1007 void
1008 dc_setfilt_admtek(sc)
1009 	struct dc_softc *sc;
1010 {
1011 	struct ifnet *ifp;
1012 	struct arpcom *ac = &sc->sc_arpcom;
1013 	struct ether_multi *enm;
1014 	struct ether_multistep step;
1015 	int h = 0;
1016 	u_int32_t hashes[2] = { 0, 0 };
1017 
1018 	ifp = &sc->sc_arpcom.ac_if;
1019 
1020 	/* Init our MAC address */
1021 	CSR_WRITE_4(sc, DC_AL_PAR0, *(u_int32_t *)(&sc->sc_arpcom.ac_enaddr[0]));
1022 	CSR_WRITE_4(sc, DC_AL_PAR1, *(u_int32_t *)(&sc->sc_arpcom.ac_enaddr[4]));
1023 
1024 	/* If we want promiscuous mode, set the allframes bit. */
1025 	if (ifp->if_flags & IFF_PROMISC)
1026 		DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_RX_PROMISC);
1027 	else
1028 		DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_RX_PROMISC);
1029 
1030 	if (ifp->if_flags & IFF_ALLMULTI)
1031 		DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_RX_ALLMULTI);
1032 	else
1033 		DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_RX_ALLMULTI);
1034 
1035 	/* first, zot all the existing hash bits */
1036 	CSR_WRITE_4(sc, DC_AL_MAR0, 0);
1037 	CSR_WRITE_4(sc, DC_AL_MAR1, 0);
1038 
1039 	/*
1040 	 * If we're already in promisc or allmulti mode, we
1041 	 * don't have to bother programming the multicast filter.
1042 	 */
1043 	if (ifp->if_flags & (IFF_PROMISC|IFF_ALLMULTI))
1044 		return;
1045 
1046 	/* now program new ones */
1047 	ETHER_FIRST_MULTI(step, ac, enm);
1048 	while (enm != NULL) {
1049 		if (DC_IS_CENTAUR(sc))
1050 			h = dc_crc_le(sc, enm->enm_addrlo);
1051 		else
1052 			h = dc_crc_be(enm->enm_addrlo);
1053 		if (h < 32)
1054 			hashes[0] |= (1 << h);
1055 		else
1056 			hashes[1] |= (1 << (h - 32));
1057 		ETHER_NEXT_MULTI(step, enm);
1058 	}
1059 
1060 	CSR_WRITE_4(sc, DC_AL_MAR0, hashes[0]);
1061 	CSR_WRITE_4(sc, DC_AL_MAR1, hashes[1]);
1062 }
1063 
1064 void
1065 dc_setfilt_asix(sc)
1066 	struct dc_softc *sc;
1067 {
1068 	struct ifnet *ifp;
1069 	struct arpcom *ac = &sc->sc_arpcom;
1070 	struct ether_multi *enm;
1071 	struct ether_multistep step;
1072 	int h = 0;
1073 	u_int32_t hashes[2] = { 0, 0 };
1074 
1075 	ifp = &sc->sc_arpcom.ac_if;
1076 
1077 	/* Init our MAC address */
1078 	CSR_WRITE_4(sc, DC_AX_FILTIDX, DC_AX_FILTIDX_PAR0);
1079 	CSR_WRITE_4(sc, DC_AX_FILTDATA,
1080 	    *(u_int32_t *)(&sc->sc_arpcom.ac_enaddr[0]));
1081 	CSR_WRITE_4(sc, DC_AX_FILTIDX, DC_AX_FILTIDX_PAR1);
1082 	CSR_WRITE_4(sc, DC_AX_FILTDATA,
1083 	    *(u_int32_t *)(&sc->sc_arpcom.ac_enaddr[4]));
1084 
1085 	/* If we want promiscuous mode, set the allframes bit. */
1086 	if (ifp->if_flags & IFF_PROMISC)
1087 		DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_RX_PROMISC);
1088 	else
1089 		DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_RX_PROMISC);
1090 
1091 	if (ifp->if_flags & IFF_ALLMULTI)
1092 		DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_RX_ALLMULTI);
1093 	else
1094 		DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_RX_ALLMULTI);
1095 
1096 	/*
1097 	 * The ASIX chip has a special bit to enable reception
1098 	 * of broadcast frames.
1099 	 */
1100 	if (ifp->if_flags & IFF_BROADCAST)
1101 		DC_SETBIT(sc, DC_NETCFG, DC_AX_NETCFG_RX_BROAD);
1102 	else
1103 		DC_CLRBIT(sc, DC_NETCFG, DC_AX_NETCFG_RX_BROAD);
1104 
1105 	/* first, zot all the existing hash bits */
1106 	CSR_WRITE_4(sc, DC_AX_FILTIDX, DC_AX_FILTIDX_MAR0);
1107 	CSR_WRITE_4(sc, DC_AX_FILTDATA, 0);
1108 	CSR_WRITE_4(sc, DC_AX_FILTIDX, DC_AX_FILTIDX_MAR1);
1109 	CSR_WRITE_4(sc, DC_AX_FILTDATA, 0);
1110 
1111 	/*
1112 	 * If we're already in promisc or allmulti mode, we
1113 	 * don't have to bother programming the multicast filter.
1114 	 */
1115 	if (ifp->if_flags & (IFF_PROMISC|IFF_ALLMULTI))
1116 		return;
1117 
1118 	/* now program new ones */
1119 	ETHER_FIRST_MULTI(step, ac, enm);
1120 	while (enm != NULL) {
1121 		h = dc_crc_be(enm->enm_addrlo);
1122 		if (h < 32)
1123 			hashes[0] |= (1 << h);
1124 		else
1125 			hashes[1] |= (1 << (h - 32));
1126 		ETHER_NEXT_MULTI(step, enm);
1127 	}
1128 
1129 	CSR_WRITE_4(sc, DC_AX_FILTIDX, DC_AX_FILTIDX_MAR0);
1130 	CSR_WRITE_4(sc, DC_AX_FILTDATA, hashes[0]);
1131 	CSR_WRITE_4(sc, DC_AX_FILTIDX, DC_AX_FILTIDX_MAR1);
1132 	CSR_WRITE_4(sc, DC_AX_FILTDATA, hashes[1]);
1133 }
1134 
1135 void
1136 dc_setfilt_xircom(sc)
1137 	struct dc_softc *sc;
1138 {
1139 	struct dc_desc *sframe;
1140 	struct arpcom *ac = &sc->sc_arpcom;
1141 	struct ether_multi *enm;
1142 	struct ether_multistep step;
1143 	u_int32_t h, *sp;
1144 	struct ifnet *ifp;
1145 	int i;
1146 
1147 	ifp = &sc->sc_arpcom.ac_if;
1148 	DC_CLRBIT(sc, DC_NETCFG, (DC_NETCFG_TX_ON|DC_NETCFG_RX_ON));
1149 
1150 	i = sc->dc_cdata.dc_tx_prod;
1151 	DC_INC(sc->dc_cdata.dc_tx_prod, DC_TX_LIST_CNT);
1152 	sc->dc_cdata.dc_tx_cnt++;
1153 	sframe = &sc->dc_ldata->dc_tx_list[i];
1154 	sp = &sc->dc_ldata->dc_sbuf[0];
1155 	bzero((char *)sp, DC_SFRAME_LEN);
1156 
1157 	sframe->dc_data = htole32(sc->sc_listmap->dm_segs[0].ds_addr +
1158 	    offsetof(struct dc_list_data, dc_sbuf));
1159 	sframe->dc_ctl = htole32(DC_SFRAME_LEN | DC_TXCTL_SETUP |
1160 	    DC_TXCTL_TLINK | DC_FILTER_HASHPERF | DC_TXCTL_FINT);
1161 
1162 	sc->dc_cdata.dc_tx_chain[i].sd_mbuf =
1163 	    (struct mbuf *)&sc->dc_ldata->dc_sbuf[0];
1164 
1165 	/* If we want promiscuous mode, set the allframes bit. */
1166 	if (ifp->if_flags & IFF_PROMISC)
1167 		DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_RX_PROMISC);
1168 	else
1169 		DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_RX_PROMISC);
1170 
1171 	if (ifp->if_flags & IFF_ALLMULTI)
1172 		DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_RX_ALLMULTI);
1173 	else
1174 		DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_RX_ALLMULTI);
1175 
1176 	/* now program new ones */
1177 	ETHER_FIRST_MULTI(step, ac, enm);
1178 	while (enm != NULL) {
1179 		h = dc_crc_le(sc, enm->enm_addrlo);
1180 		sp[h >> 4] |= htole32(1 << (h & 0xF));
1181 		ETHER_NEXT_MULTI(step, enm);
1182 	}
1183 
1184 	if (ifp->if_flags & IFF_BROADCAST) {
1185 		h = dc_crc_le(sc, (caddr_t)&etherbroadcastaddr);
1186 		sp[h >> 4] |= htole32(1 << (h & 0xF));
1187 	}
1188 
1189 	/* Set our MAC address */
1190 	sp[0] = DC_SP_FIELD(sc->sc_arpcom.ac_enaddr, 0);
1191 	sp[1] = DC_SP_FIELD(sc->sc_arpcom.ac_enaddr, 1);
1192 	sp[2] = DC_SP_FIELD(sc->sc_arpcom.ac_enaddr, 2);
1193 
1194 	DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_TX_ON);
1195 	DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_RX_ON);
1196 	ifp->if_flags |= IFF_RUNNING;
1197 	sframe->dc_status = htole32(DC_TXSTAT_OWN);
1198 	CSR_WRITE_4(sc, DC_TXSTART, 0xFFFFFFFF);
1199 
1200 	/*
1201 	 * wait some time...
1202 	 */
1203 	DELAY(1000);
1204 
1205 	ifp->if_timer = 5;
1206 }
1207 
1208 void
1209 dc_setfilt(sc)
1210 	struct dc_softc *sc;
1211 {
1212 	if (DC_IS_INTEL(sc) || DC_IS_MACRONIX(sc) || DC_IS_PNIC(sc) ||
1213 	    DC_IS_PNICII(sc) || DC_IS_DAVICOM(sc) || DC_IS_CONEXANT(sc))
1214 		dc_setfilt_21143(sc);
1215 
1216 	if (DC_IS_ASIX(sc))
1217 		dc_setfilt_asix(sc);
1218 
1219 	if (DC_IS_ADMTEK(sc))
1220 		dc_setfilt_admtek(sc);
1221 
1222 	if (DC_IS_XIRCOM(sc))
1223 		dc_setfilt_xircom(sc);
1224 }
1225 
1226 /*
1227  * In order to fiddle with the
1228  * 'full-duplex' and '100Mbps' bits in the netconfig register, we
1229  * first have to put the transmit and/or receive logic in the idle state.
1230  */
1231 void
1232 dc_setcfg(sc, media)
1233 	struct dc_softc *sc;
1234 	int media;
1235 {
1236 	int i, restart = 0;
1237 	u_int32_t isr;
1238 
1239 	if (IFM_SUBTYPE(media) == IFM_NONE)
1240 		return;
1241 
1242 	if (CSR_READ_4(sc, DC_NETCFG) & (DC_NETCFG_TX_ON|DC_NETCFG_RX_ON)) {
1243 		restart = 1;
1244 		DC_CLRBIT(sc, DC_NETCFG, (DC_NETCFG_TX_ON|DC_NETCFG_RX_ON));
1245 
1246 		for (i = 0; i < DC_TIMEOUT; i++) {
1247 			DELAY(10);
1248 			isr = CSR_READ_4(sc, DC_ISR);
1249 			if (isr & DC_ISR_TX_IDLE ||
1250 			    (isr & DC_ISR_RX_STATE) == DC_RXSTATE_STOPPED)
1251 				break;
1252 		}
1253 
1254 		if (i == DC_TIMEOUT)
1255 			printf("%s: failed to force tx and "
1256 			    "rx to idle state\n", sc->sc_dev.dv_xname);
1257 
1258 	}
1259 
1260 	if (IFM_SUBTYPE(media) == IFM_100_TX) {
1261 		DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_SPEEDSEL);
1262 		DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_HEARTBEAT);
1263 		if (sc->dc_pmode == DC_PMODE_MII) {
1264 			int watchdogreg;
1265 
1266 			if (DC_IS_INTEL(sc)) {
1267 			/* there's a write enable bit here that reads as 1 */
1268 				watchdogreg = CSR_READ_4(sc, DC_WATCHDOG);
1269 				watchdogreg &= ~DC_WDOG_CTLWREN;
1270 				watchdogreg |= DC_WDOG_JABBERDIS;
1271 				CSR_WRITE_4(sc, DC_WATCHDOG, watchdogreg);
1272 			} else {
1273 				DC_SETBIT(sc, DC_WATCHDOG, DC_WDOG_JABBERDIS);
1274 			}
1275 			DC_CLRBIT(sc, DC_NETCFG, (DC_NETCFG_PCS|
1276 			    DC_NETCFG_PORTSEL|DC_NETCFG_SCRAMBLER));
1277 			if (sc->dc_type == DC_TYPE_98713)
1278 				DC_SETBIT(sc, DC_NETCFG, (DC_NETCFG_PCS|
1279 				    DC_NETCFG_SCRAMBLER));
1280 			if (!DC_IS_DAVICOM(sc))
1281 				DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_PORTSEL);
1282 			DC_CLRBIT(sc, DC_10BTCTRL, 0xFFFF);
1283 			if (DC_IS_INTEL(sc))
1284 				dc_apply_fixup(sc, IFM_AUTO);
1285 		} else {
1286 			if (DC_IS_PNIC(sc)) {
1287 				DC_PN_GPIO_SETBIT(sc, DC_PN_GPIO_SPEEDSEL);
1288 				DC_PN_GPIO_SETBIT(sc, DC_PN_GPIO_100TX_LOOP);
1289 				DC_SETBIT(sc, DC_PN_NWAY, DC_PN_NWAY_SPEEDSEL);
1290 			}
1291 			DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_PORTSEL);
1292 			DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_PCS);
1293 			DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_SCRAMBLER);
1294 			if (DC_IS_INTEL(sc))
1295 				dc_apply_fixup(sc,
1296 				    (media & IFM_GMASK) == IFM_FDX ?
1297 				    IFM_100_TX|IFM_FDX : IFM_100_TX);
1298 		}
1299 	}
1300 
1301 	if (IFM_SUBTYPE(media) == IFM_10_T) {
1302 		DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_SPEEDSEL);
1303 		DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_HEARTBEAT);
1304 		if (sc->dc_pmode == DC_PMODE_MII) {
1305 			int watchdogreg;
1306 
1307 			if (DC_IS_INTEL(sc)) {
1308 			/* there's a write enable bit here that reads as 1 */
1309 				watchdogreg = CSR_READ_4(sc, DC_WATCHDOG);
1310 				watchdogreg &= ~DC_WDOG_CTLWREN;
1311 				watchdogreg |= DC_WDOG_JABBERDIS;
1312 				CSR_WRITE_4(sc, DC_WATCHDOG, watchdogreg);
1313 			} else {
1314 				DC_SETBIT(sc, DC_WATCHDOG, DC_WDOG_JABBERDIS);
1315 			}
1316 			DC_CLRBIT(sc, DC_NETCFG, (DC_NETCFG_PCS|
1317 			    DC_NETCFG_PORTSEL|DC_NETCFG_SCRAMBLER));
1318 			if (sc->dc_type == DC_TYPE_98713)
1319 				DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_PCS);
1320 			if (!DC_IS_DAVICOM(sc))
1321 				DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_PORTSEL);
1322 			DC_CLRBIT(sc, DC_10BTCTRL, 0xFFFF);
1323 			if (DC_IS_INTEL(sc))
1324 				dc_apply_fixup(sc, IFM_AUTO);
1325 		} else {
1326 			if (DC_IS_PNIC(sc)) {
1327 				DC_PN_GPIO_CLRBIT(sc, DC_PN_GPIO_SPEEDSEL);
1328 				DC_PN_GPIO_SETBIT(sc, DC_PN_GPIO_100TX_LOOP);
1329 				DC_CLRBIT(sc, DC_PN_NWAY, DC_PN_NWAY_SPEEDSEL);
1330 			}
1331 			DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_PORTSEL);
1332 			DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_PCS);
1333 			DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_SCRAMBLER);
1334 			if (DC_IS_INTEL(sc)) {
1335 				DC_CLRBIT(sc, DC_SIARESET, DC_SIA_RESET);
1336 				DC_CLRBIT(sc, DC_10BTCTRL, 0xFFFF);
1337 				if ((media & IFM_GMASK) == IFM_FDX)
1338 					DC_SETBIT(sc, DC_10BTCTRL, 0x7F3D);
1339 				else
1340 					DC_SETBIT(sc, DC_10BTCTRL, 0x7F3F);
1341 				DC_SETBIT(sc, DC_SIARESET, DC_SIA_RESET);
1342 				DC_CLRBIT(sc, DC_10BTCTRL,
1343 				    DC_TCTL_AUTONEGENBL);
1344 				dc_apply_fixup(sc,
1345 				    (media & IFM_GMASK) == IFM_FDX ?
1346 				    IFM_10_T|IFM_FDX : IFM_10_T);
1347 				DELAY(20000);
1348 			}
1349 		}
1350 	}
1351 
1352 	/*
1353 	 * If this is a Davicom DM9102A card with a DM9801 HomePNA
1354 	 * PHY and we want HomePNA mode, set the portsel bit to turn
1355 	 * on the external MII port.
1356 	 */
1357 	if (DC_IS_DAVICOM(sc)) {
1358 		if (IFM_SUBTYPE(media) == IFM_HPNA_1) {
1359 			DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_PORTSEL);
1360 			sc->dc_link = 1;
1361 		} else {
1362 			DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_PORTSEL);
1363 		}
1364 	}
1365 
1366 	if ((media & IFM_GMASK) == IFM_FDX) {
1367 		DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_FULLDUPLEX);
1368 		if (sc->dc_pmode == DC_PMODE_SYM && DC_IS_PNIC(sc))
1369 			DC_SETBIT(sc, DC_PN_NWAY, DC_PN_NWAY_DUPLEX);
1370 	} else {
1371 		DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_FULLDUPLEX);
1372 		if (sc->dc_pmode == DC_PMODE_SYM && DC_IS_PNIC(sc))
1373 			DC_CLRBIT(sc, DC_PN_NWAY, DC_PN_NWAY_DUPLEX);
1374 	}
1375 
1376 	if (restart)
1377 		DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_TX_ON|DC_NETCFG_RX_ON);
1378 }
1379 
1380 void
1381 dc_reset(sc)
1382 	struct dc_softc *sc;
1383 {
1384 	int i;
1385 
1386 	DC_SETBIT(sc, DC_BUSCTL, DC_BUSCTL_RESET);
1387 
1388 	for (i = 0; i < DC_TIMEOUT; i++) {
1389 		DELAY(10);
1390 		if (!(CSR_READ_4(sc, DC_BUSCTL) & DC_BUSCTL_RESET))
1391 			break;
1392 	}
1393 
1394 	if (DC_IS_ASIX(sc) || DC_IS_ADMTEK(sc) || DC_IS_XIRCOM(sc) ||
1395 	    DC_IS_INTEL(sc) || DC_IS_CONEXANT(sc)) {
1396 		DELAY(10000);
1397 		DC_CLRBIT(sc, DC_BUSCTL, DC_BUSCTL_RESET);
1398 		i = 0;
1399 	}
1400 
1401 	if (i == DC_TIMEOUT)
1402 		printf("%s: reset never completed!\n", sc->sc_dev.dv_xname);
1403 
1404 	/* Wait a little while for the chip to get its brains in order. */
1405 	DELAY(1000);
1406 
1407 	CSR_WRITE_4(sc, DC_IMR, 0x00000000);
1408 	CSR_WRITE_4(sc, DC_BUSCTL, 0x00000000);
1409 	CSR_WRITE_4(sc, DC_NETCFG, 0x00000000);
1410 
1411 	/*
1412 	 * Bring the SIA out of reset. In some cases, it looks
1413 	 * like failing to unreset the SIA soon enough gets it
1414 	 * into a state where it will never come out of reset
1415 	 * until we reset the whole chip again.
1416 	 */
1417 	if (DC_IS_INTEL(sc)) {
1418 		DC_SETBIT(sc, DC_SIARESET, DC_SIA_RESET);
1419 		CSR_WRITE_4(sc, DC_10BTCTRL, 0);
1420 		CSR_WRITE_4(sc, DC_WATCHDOG, 0);
1421 	}
1422 
1423 	if (sc->dc_type == DC_TYPE_21145)
1424 		dc_setcfg(sc, IFM_10_T);
1425 }
1426 
1427 void
1428 dc_apply_fixup(sc, media)
1429 	struct dc_softc *sc;
1430 	int media;
1431 {
1432 	struct dc_mediainfo *m;
1433 	u_int8_t *p;
1434 	int i;
1435 	u_int32_t reg;
1436 
1437 	m = sc->dc_mi;
1438 
1439 	while (m != NULL) {
1440 		if (m->dc_media == media)
1441 			break;
1442 		m = m->dc_next;
1443 	}
1444 
1445 	if (m == NULL)
1446 		return;
1447 
1448 	for (i = 0, p = m->dc_reset_ptr; i < m->dc_reset_len; i++, p += 2) {
1449 		reg = (p[0] | (p[1] << 8)) << 16;
1450 		CSR_WRITE_4(sc, DC_WATCHDOG, reg);
1451 	}
1452 
1453 	for (i = 0, p = m->dc_gp_ptr; i < m->dc_gp_len; i++, p += 2) {
1454 		reg = (p[0] | (p[1] << 8)) << 16;
1455 		CSR_WRITE_4(sc, DC_WATCHDOG, reg);
1456 	}
1457 }
1458 
1459 void
1460 dc_decode_leaf_sia(sc, l)
1461 	struct dc_softc *sc;
1462 	struct dc_eblock_sia *l;
1463 {
1464 	struct dc_mediainfo *m;
1465 
1466 	m = malloc(sizeof(struct dc_mediainfo), M_DEVBUF, M_NOWAIT);
1467 	if (m == NULL)
1468 		return;
1469 	bzero(m, sizeof(struct dc_mediainfo));
1470 	if (l->dc_sia_code == DC_SIA_CODE_10BT)
1471 		m->dc_media = IFM_10_T;
1472 
1473 	if (l->dc_sia_code == DC_SIA_CODE_10BT_FDX)
1474 		m->dc_media = IFM_10_T|IFM_FDX;
1475 
1476 	if (l->dc_sia_code == DC_SIA_CODE_10B2)
1477 		m->dc_media = IFM_10_2;
1478 
1479 	if (l->dc_sia_code == DC_SIA_CODE_10B5)
1480 		m->dc_media = IFM_10_5;
1481 
1482 	m->dc_gp_len = 2;
1483 	m->dc_gp_ptr = (u_int8_t *)&l->dc_sia_gpio_ctl;
1484 
1485 	m->dc_next = sc->dc_mi;
1486 	sc->dc_mi = m;
1487 
1488 	sc->dc_pmode = DC_PMODE_SIA;
1489 }
1490 
1491 void
1492 dc_decode_leaf_sym(sc, l)
1493 	struct dc_softc *sc;
1494 	struct dc_eblock_sym *l;
1495 {
1496 	struct dc_mediainfo *m;
1497 
1498 	m = malloc(sizeof(struct dc_mediainfo), M_DEVBUF, M_NOWAIT);
1499 	if (m == NULL)
1500 		return;
1501 	bzero(m, sizeof(struct dc_mediainfo));
1502 	if (l->dc_sym_code == DC_SYM_CODE_100BT)
1503 		m->dc_media = IFM_100_TX;
1504 
1505 	if (l->dc_sym_code == DC_SYM_CODE_100BT_FDX)
1506 		m->dc_media = IFM_100_TX|IFM_FDX;
1507 
1508 	m->dc_gp_len = 2;
1509 	m->dc_gp_ptr = (u_int8_t *)&l->dc_sym_gpio_ctl;
1510 
1511 	m->dc_next = sc->dc_mi;
1512 	sc->dc_mi = m;
1513 
1514 	sc->dc_pmode = DC_PMODE_SYM;
1515 }
1516 
1517 void
1518 dc_decode_leaf_mii(sc, l)
1519 	struct dc_softc *sc;
1520 	struct dc_eblock_mii *l;
1521 {
1522 	u_int8_t *p;
1523 	struct dc_mediainfo *m;
1524 
1525 	m = malloc(sizeof(struct dc_mediainfo), M_DEVBUF, M_NOWAIT);
1526 	if (m == NULL)
1527 		return;
1528 	bzero(m, sizeof(struct dc_mediainfo));
1529 	/* We abuse IFM_AUTO to represent MII. */
1530 	m->dc_media = IFM_AUTO;
1531 	m->dc_gp_len = l->dc_gpr_len;
1532 
1533 	p = (u_int8_t *)l;
1534 	p += sizeof(struct dc_eblock_mii);
1535 	m->dc_gp_ptr = p;
1536 	p += 2 * l->dc_gpr_len;
1537 	m->dc_reset_len = *p;
1538 	p++;
1539 	m->dc_reset_ptr = p;
1540 
1541 	m->dc_next = sc->dc_mi;
1542 	sc->dc_mi = m;
1543 }
1544 
1545 void
1546 dc_read_srom(sc, bits)
1547 	struct dc_softc *sc;
1548 	int bits;
1549 {
1550 	int size;
1551 
1552 	size = 2 << bits;
1553 	sc->dc_srom = malloc(size, M_DEVBUF, M_NOWAIT);
1554 	if (sc->dc_srom == NULL)
1555 		return;
1556 	dc_read_eeprom(sc, (caddr_t)sc->dc_srom, 0, (size / 2), 0);
1557 }
1558 
1559 void
1560 dc_parse_21143_srom(sc)
1561 	struct dc_softc *sc;
1562 {
1563 	struct dc_leaf_hdr *lhdr;
1564 	struct dc_eblock_hdr *hdr;
1565 	int i, loff;
1566 	char *ptr;
1567 
1568 	loff = sc->dc_srom[27];
1569 	lhdr = (struct dc_leaf_hdr *)&(sc->dc_srom[loff]);
1570 
1571 	ptr = (char *)lhdr;
1572 	ptr += sizeof(struct dc_leaf_hdr) - 1;
1573 	for (i = 0; i < lhdr->dc_mcnt; i++) {
1574 		hdr = (struct dc_eblock_hdr *)ptr;
1575 		switch(hdr->dc_type) {
1576 		case DC_EBLOCK_MII:
1577 			dc_decode_leaf_mii(sc, (struct dc_eblock_mii *)hdr);
1578 			break;
1579 		case DC_EBLOCK_SIA:
1580 			dc_decode_leaf_sia(sc, (struct dc_eblock_sia *)hdr);
1581 			break;
1582 		case DC_EBLOCK_SYM:
1583 			dc_decode_leaf_sym(sc, (struct dc_eblock_sym *)hdr);
1584 			break;
1585 		default:
1586 			/* Don't care. Yet. */
1587 			break;
1588 		}
1589 		ptr += (hdr->dc_len & 0x7F);
1590 		ptr++;
1591 	}
1592 }
1593 
1594 /*
1595  * Attach the interface. Allocate softc structures, do ifmedia
1596  * setup and ethernet/BPF attach.
1597  */
1598 void
1599 dc_attach(sc)
1600 	struct dc_softc *sc;
1601 {
1602 	struct ifnet *ifp;
1603 	int mac_offset, tmp, i;
1604 
1605 	/*
1606 	 * Get station address from the EEPROM.
1607 	 */
1608 	if (sc->sc_hasmac)
1609 		goto hasmac;
1610 
1611 	switch(sc->dc_type) {
1612 	case DC_TYPE_98713:
1613 	case DC_TYPE_98713A:
1614 	case DC_TYPE_987x5:
1615 	case DC_TYPE_PNICII:
1616 		dc_read_eeprom(sc, (caddr_t)&mac_offset,
1617 		    (DC_EE_NODEADDR_OFFSET / 2), 1, 0);
1618 		dc_read_eeprom(sc, (caddr_t)&sc->sc_arpcom.ac_enaddr,
1619 		    (mac_offset / 2), 3, 0);
1620 		break;
1621 	case DC_TYPE_PNIC:
1622 		dc_read_eeprom(sc, (caddr_t)&sc->sc_arpcom.ac_enaddr, 0, 3, 1);
1623 		break;
1624 	case DC_TYPE_DM9102:
1625 	case DC_TYPE_21143:
1626 	case DC_TYPE_21145:
1627 	case DC_TYPE_ASIX:
1628 		dc_read_eeprom(sc, (caddr_t)&sc->sc_arpcom.ac_enaddr,
1629 		    DC_EE_NODEADDR, 3, 0);
1630 		break;
1631 	case DC_TYPE_AL981:
1632 	case DC_TYPE_AN983:
1633 		bcopy(&sc->dc_srom[DC_AL_EE_NODEADDR], &sc->sc_arpcom.ac_enaddr,
1634 		    ETHER_ADDR_LEN);
1635 		break;
1636 	case DC_TYPE_XIRCOM:
1637 		break;
1638 	case DC_TYPE_CONEXANT:
1639 		bcopy(&sc->dc_srom + DC_CONEXANT_EE_NODEADDR,
1640 		    &sc->sc_arpcom.ac_enaddr, ETHER_ADDR_LEN);
1641 		break;
1642 	default:
1643 		dc_read_eeprom(sc, (caddr_t)&sc->sc_arpcom.ac_enaddr,
1644 		    DC_EE_NODEADDR, 3, 0);
1645 		break;
1646 	}
1647 hasmac:
1648 
1649 	if (bus_dmamem_alloc(sc->sc_dmat, sizeof(struct dc_list_data),
1650 	    PAGE_SIZE, 0, sc->sc_listseg, 1, &sc->sc_listnseg,
1651 	    BUS_DMA_NOWAIT) != 0) {
1652 		printf(": can't alloc list mem\n");
1653 		goto fail;
1654 	}
1655 	if (bus_dmamem_map(sc->sc_dmat, sc->sc_listseg, sc->sc_listnseg,
1656 	    sizeof(struct dc_list_data), &sc->sc_listkva,
1657 	    BUS_DMA_NOWAIT) != 0) {
1658 		printf(": can't map list mem\n");
1659 		goto fail;
1660 	}
1661 	if (bus_dmamap_create(sc->sc_dmat, sizeof(struct dc_list_data), 1,
1662 	    sizeof(struct dc_list_data), 0, BUS_DMA_NOWAIT,
1663 	    &sc->sc_listmap) != 0) {
1664 		printf(": can't alloc list map\n");
1665 		goto fail;
1666 	}
1667 	if (bus_dmamap_load(sc->sc_dmat, sc->sc_listmap, sc->sc_listkva,
1668 	    sizeof(struct dc_list_data), NULL, BUS_DMA_NOWAIT) != 0) {
1669 		printf(": can't load list map\n");
1670 		goto fail;
1671 	}
1672 	sc->dc_ldata = (struct dc_list_data *)sc->sc_listkva;
1673 	bzero(sc->dc_ldata, sizeof(struct dc_list_data));
1674 
1675 	for (i = 0; i < DC_RX_LIST_CNT; i++) {
1676 		if (bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, MCLBYTES,
1677 		    0, BUS_DMA_NOWAIT,
1678 		    &sc->dc_cdata.dc_rx_chain[i].sd_map) != 0) {
1679 			printf(": can't create rx map\n");
1680 			return;
1681 		}
1682 	}
1683 	if (bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, MCLBYTES, 0,
1684 	    BUS_DMA_NOWAIT, &sc->sc_rx_sparemap) != 0) {
1685 		printf(": can't create rx spare map\n");
1686 		return;
1687 	}
1688 
1689 	for (i = 0; i < DC_TX_LIST_CNT; i++) {
1690 		if (bus_dmamap_create(sc->sc_dmat, MCLBYTES,
1691 		    DC_TX_LIST_CNT - 5, MCLBYTES, 0, BUS_DMA_NOWAIT,
1692 		    &sc->dc_cdata.dc_tx_chain[i].sd_map) != 0) {
1693 			printf(": can't create tx map\n");
1694 			return;
1695 		}
1696 	}
1697 	if (bus_dmamap_create(sc->sc_dmat, MCLBYTES, DC_TX_LIST_CNT - 5,
1698 	    MCLBYTES, 0, BUS_DMA_NOWAIT, &sc->sc_tx_sparemap) != 0) {
1699 		printf(": can't create tx spare map\n");
1700 		return;
1701 	}
1702 
1703 	/*
1704 	 * A 21143 or clone chip was detected. Inform the world.
1705 	 */
1706 	printf(" address %s\n", ether_sprintf(sc->sc_arpcom.ac_enaddr));
1707 
1708 	ifp = &sc->sc_arpcom.ac_if;
1709 	ifp->if_softc = sc;
1710 	ifp->if_mtu = ETHERMTU;
1711 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1712 	ifp->if_ioctl = dc_ioctl;
1713 	ifp->if_output = ether_output;
1714 	ifp->if_start = dc_start;
1715 	ifp->if_watchdog = dc_watchdog;
1716 	ifp->if_baudrate = 10000000;
1717 	IFQ_SET_MAXLEN(&ifp->if_snd, DC_TX_LIST_CNT - 1);
1718 	IFQ_SET_READY(&ifp->if_snd);
1719 	bcopy(sc->sc_dev.dv_xname, ifp->if_xname, IFNAMSIZ);
1720 
1721 #if NVLAN > 0
1722 	ifp->if_capabilities = IFCAP_VLAN_MTU;
1723 #endif
1724 
1725 	/* Do MII setup. If this is a 21143, check for a PHY on the
1726 	 * MII bus after applying any necessary fixups to twiddle the
1727 	 * GPIO bits. If we don't end up finding a PHY, restore the
1728 	 * old selection (SIA only or SIA/SYM) and attach the dcphy
1729 	 * driver instead.
1730 	 */
1731 	if (DC_IS_INTEL(sc)) {
1732 		dc_apply_fixup(sc, IFM_AUTO);
1733 		tmp = sc->dc_pmode;
1734 		sc->dc_pmode = DC_PMODE_MII;
1735 	}
1736 
1737 	sc->sc_mii.mii_ifp = ifp;
1738 	sc->sc_mii.mii_readreg = dc_miibus_readreg;
1739 	sc->sc_mii.mii_writereg = dc_miibus_writereg;
1740 	sc->sc_mii.mii_statchg = dc_miibus_statchg;
1741 	ifmedia_init(&sc->sc_mii.mii_media, 0, dc_ifmedia_upd, dc_ifmedia_sts);
1742 	mii_attach(&sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
1743 	    MII_OFFSET_ANY, 0);
1744 
1745 	if (DC_IS_INTEL(sc)) {
1746 		if (LIST_EMPTY(&sc->sc_mii.mii_phys)) {
1747 			sc->dc_pmode = tmp;
1748 			if (sc->dc_pmode != DC_PMODE_SIA)
1749 				sc->dc_pmode = DC_PMODE_SYM;
1750 			sc->dc_flags |= DC_21143_NWAY;
1751 			if (sc->dc_flags & DC_MOMENCO_BOTCH)
1752 				sc->dc_pmode = DC_PMODE_MII;
1753 			mii_attach(&sc->sc_dev, &sc->sc_mii, 0xffffffff,
1754 			    MII_PHY_ANY, MII_OFFSET_ANY, 0);
1755 		} else {
1756 			/* we have a PHY, so we must clear this bit */
1757 			sc->dc_flags &= ~DC_TULIP_LEDS;
1758 		}
1759 	}
1760 
1761 	if (LIST_EMPTY(&sc->sc_mii.mii_phys)) {
1762 		ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE, 0, NULL);
1763 		ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE);
1764 		printf("%s: MII without any PHY!\n", sc->sc_dev.dv_xname);
1765 	} else if (sc->dc_type == DC_TYPE_21145) {
1766 		ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_10_T);
1767 	} else
1768 		ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_AUTO);
1769 
1770 	if (DC_IS_DAVICOM(sc) && sc->dc_revision >= DC_REVISION_DM9102A)
1771 		ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|IFM_HPNA_1,0,NULL);
1772 
1773 	if (DC_IS_XIRCOM(sc)) {
1774 		/*
1775 		 * setup General Purpose Port mode and data so the tulip
1776 		 * can talk to the MII.
1777 		 */
1778 		CSR_WRITE_4(sc, DC_SIAGP, DC_SIAGP_WRITE_EN | DC_SIAGP_INT1_EN |
1779 		    DC_SIAGP_MD_GP2_OUTPUT | DC_SIAGP_MD_GP0_OUTPUT);
1780 		DELAY(10);
1781 		CSR_WRITE_4(sc, DC_SIAGP, DC_SIAGP_INT1_EN |
1782 		    DC_SIAGP_MD_GP2_OUTPUT | DC_SIAGP_MD_GP0_OUTPUT);
1783 		DELAY(10);
1784 	}
1785 
1786 	/*
1787 	 * Call MI attach routines.
1788 	 */
1789 	if_attach(ifp);
1790 	ether_ifattach(ifp);
1791 
1792 	sc->sc_dhook = shutdownhook_establish(dc_shutdown, sc);
1793 
1794 fail:
1795 	return;
1796 }
1797 
1798 int
1799 dc_detach(sc)
1800 	struct dc_softc *sc;
1801 {
1802 	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
1803 
1804 	if (LIST_FIRST(&sc->sc_mii.mii_phys) != NULL)
1805 		mii_detach(&sc->sc_mii, MII_PHY_ANY, MII_OFFSET_ANY);
1806 
1807 	if (sc->dc_srom)
1808 		free(sc->dc_srom, M_DEVBUF);
1809 
1810 	timeout_del(&sc->dc_tick_tmo);
1811 
1812 	ether_ifdetach(ifp);
1813 	if_detach(ifp);
1814 
1815 	shutdownhook_disestablish(sc->sc_dhook);
1816 
1817 	return (0);
1818 }
1819 
1820 /*
1821  * Initialize the transmit descriptors.
1822  */
1823 int
1824 dc_list_tx_init(sc)
1825 	struct dc_softc *sc;
1826 {
1827 	struct dc_chain_data *cd;
1828 	struct dc_list_data *ld;
1829 	int i;
1830 	bus_addr_t next;
1831 
1832 	cd = &sc->dc_cdata;
1833 	ld = sc->dc_ldata;
1834 	for (i = 0; i < DC_TX_LIST_CNT; i++) {
1835 		next = sc->sc_listmap->dm_segs[0].ds_addr;
1836 		if (i == (DC_TX_LIST_CNT - 1))
1837 			next +=
1838 			    offsetof(struct dc_list_data, dc_tx_list[0]);
1839 		else
1840 			next +=
1841 			    offsetof(struct dc_list_data, dc_tx_list[i + 1]);
1842 		cd->dc_tx_chain[i].sd_mbuf = NULL;
1843 		ld->dc_tx_list[i].dc_data = htole32(0);
1844 		ld->dc_tx_list[i].dc_ctl = htole32(0);
1845 		ld->dc_tx_list[i].dc_next = htole32(next);
1846 	}
1847 
1848 	cd->dc_tx_prod = cd->dc_tx_cons = cd->dc_tx_cnt = 0;
1849 
1850 	return (0);
1851 }
1852 
1853 
1854 /*
1855  * Initialize the RX descriptors and allocate mbufs for them. Note that
1856  * we arrange the descriptors in a closed ring, so that the last descriptor
1857  * points back to the first.
1858  */
1859 int
1860 dc_list_rx_init(sc)
1861 	struct dc_softc *sc;
1862 {
1863 	struct dc_chain_data *cd;
1864 	struct dc_list_data *ld;
1865 	int i;
1866 	bus_addr_t next;
1867 
1868 	cd = &sc->dc_cdata;
1869 	ld = sc->dc_ldata;
1870 
1871 	for (i = 0; i < DC_RX_LIST_CNT; i++) {
1872 		if (dc_newbuf(sc, i, NULL) == ENOBUFS)
1873 			return (ENOBUFS);
1874 		next = sc->sc_listmap->dm_segs[0].ds_addr;
1875 		if (i == (DC_RX_LIST_CNT - 1))
1876 			next +=
1877 			    offsetof(struct dc_list_data, dc_rx_list[0]);
1878 		else
1879 			next +=
1880 			    offsetof(struct dc_list_data, dc_rx_list[i + 1]);
1881 		ld->dc_rx_list[i].dc_next = htole32(next);
1882 	}
1883 
1884 	cd->dc_rx_prod = 0;
1885 
1886 	return (0);
1887 }
1888 
1889 /*
1890  * Initialize an RX descriptor and attach an MBUF cluster.
1891  */
1892 int
1893 dc_newbuf(sc, i, m)
1894 	struct dc_softc *sc;
1895 	int i;
1896 	struct mbuf *m;
1897 {
1898 	struct mbuf *m_new = NULL;
1899 	struct dc_desc *c;
1900 	bus_dmamap_t map;
1901 
1902 	c = &sc->dc_ldata->dc_rx_list[i];
1903 
1904 	if (m == NULL) {
1905 		MGETHDR(m_new, M_DONTWAIT, MT_DATA);
1906 		if (m_new == NULL) {
1907 			printf("%s: no memory for rx list "
1908 			    "-- packet dropped!\n", sc->sc_dev.dv_xname);
1909 			return (ENOBUFS);
1910 		}
1911 
1912 		MCLGET(m_new, M_DONTWAIT);
1913 		if (!(m_new->m_flags & M_EXT)) {
1914 			printf("%s: no memory for rx list "
1915 			    "-- packet dropped!\n", sc->sc_dev.dv_xname);
1916 			m_freem(m_new);
1917 			return (ENOBUFS);
1918 		}
1919 		m_new->m_len = m_new->m_pkthdr.len = MCLBYTES;
1920 		if (bus_dmamap_load(sc->sc_dmat, sc->sc_rx_sparemap,
1921 		    mtod(m_new, caddr_t), MCLBYTES, NULL,
1922 		    BUS_DMA_NOWAIT) != 0) {
1923 			printf("%s: rx load failed\n", sc->sc_dev.dv_xname);
1924 			m_freem(m_new);
1925 			return (ENOBUFS);
1926 		}
1927 		map = sc->dc_cdata.dc_rx_chain[i].sd_map;
1928 		sc->dc_cdata.dc_rx_chain[i].sd_map = sc->sc_rx_sparemap;
1929 		sc->sc_rx_sparemap = map;
1930 	} else {
1931 		m_new = m;
1932 		m_new->m_len = m_new->m_pkthdr.len = MCLBYTES;
1933 		m_new->m_data = m_new->m_ext.ext_buf;
1934 	}
1935 
1936 	m_adj(m_new, sizeof(u_int64_t));
1937 
1938 	/*
1939 	 * If this is a PNIC chip, zero the buffer. This is part
1940 	 * of the workaround for the receive bug in the 82c168 and
1941 	 * 82c169 chips.
1942 	 */
1943 	if (sc->dc_flags & DC_PNIC_RX_BUG_WAR)
1944 		bzero((char *)mtod(m_new, char *), m_new->m_len);
1945 
1946 	bus_dmamap_sync(sc->sc_dmat, sc->dc_cdata.dc_rx_chain[i].sd_map, 0,
1947 	    sc->dc_cdata.dc_rx_chain[i].sd_map->dm_mapsize,
1948 	    BUS_DMASYNC_PREREAD);
1949 
1950 	sc->dc_cdata.dc_rx_chain[i].sd_mbuf = m_new;
1951 	c->dc_data = htole32(
1952 	    sc->dc_cdata.dc_rx_chain[i].sd_map->dm_segs[0].ds_addr +
1953 	    sizeof(u_int64_t));
1954 	c->dc_ctl = htole32(DC_RXCTL_RLINK | DC_RXLEN);
1955 	c->dc_status = htole32(DC_RXSTAT_OWN);
1956 
1957 	bus_dmamap_sync(sc->sc_dmat, sc->sc_listmap,
1958 	    offsetof(struct dc_list_data, dc_rx_list[i]),
1959 	    sizeof(struct dc_desc),
1960 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1961 
1962 	return (0);
1963 }
1964 
1965 /*
1966  * Grrrrr.
1967  * The PNIC chip has a terrible bug in it that manifests itself during
1968  * periods of heavy activity. The exact mode of failure if difficult to
1969  * pinpoint: sometimes it only happens in promiscuous mode, sometimes it
1970  * will happen on slow machines. The bug is that sometimes instead of
1971  * uploading one complete frame during reception, it uploads what looks
1972  * like the entire contents of its FIFO memory. The frame we want is at
1973  * the end of the whole mess, but we never know exactly how much data has
1974  * been uploaded, so salvaging the frame is hard.
1975  *
1976  * There is only one way to do it reliably, and it's disgusting.
1977  * Here's what we know:
1978  *
1979  * - We know there will always be somewhere between one and three extra
1980  *   descriptors uploaded.
1981  *
1982  * - We know the desired received frame will always be at the end of the
1983  *   total data upload.
1984  *
1985  * - We know the size of the desired received frame because it will be
1986  *   provided in the length field of the status word in the last descriptor.
1987  *
1988  * Here's what we do:
1989  *
1990  * - When we allocate buffers for the receive ring, we bzero() them.
1991  *   This means that we know that the buffer contents should be all
1992  *   zeros, except for data uploaded by the chip.
1993  *
1994  * - We also force the PNIC chip to upload frames that include the
1995  *   ethernet CRC at the end.
1996  *
1997  * - We gather all of the bogus frame data into a single buffer.
1998  *
1999  * - We then position a pointer at the end of this buffer and scan
2000  *   backwards until we encounter the first non-zero byte of data.
2001  *   This is the end of the received frame. We know we will encounter
2002  *   some data at the end of the frame because the CRC will always be
2003  *   there, so even if the sender transmits a packet of all zeros,
2004  *   we won't be fooled.
2005  *
2006  * - We know the size of the actual received frame, so we subtract
2007  *   that value from the current pointer location. This brings us
2008  *   to the start of the actual received packet.
2009  *
2010  * - We copy this into an mbuf and pass it on, along with the actual
2011  *   frame length.
2012  *
2013  * The performance hit is tremendous, but it beats dropping frames all
2014  * the time.
2015  */
2016 
2017 #define DC_WHOLEFRAME	(DC_RXSTAT_FIRSTFRAG|DC_RXSTAT_LASTFRAG)
2018 void
2019 dc_pnic_rx_bug_war(sc, idx)
2020 	struct dc_softc *sc;
2021 	int idx;
2022 {
2023 	struct dc_desc		*cur_rx;
2024 	struct dc_desc		*c = NULL;
2025 	struct mbuf		*m = NULL;
2026 	unsigned char		*ptr;
2027 	int			i, total_len;
2028 	u_int32_t		rxstat = 0;
2029 
2030 	i = sc->dc_pnic_rx_bug_save;
2031 	cur_rx = &sc->dc_ldata->dc_rx_list[idx];
2032 	ptr = sc->dc_pnic_rx_buf;
2033 	bzero(ptr, DC_RXLEN * 5);
2034 
2035 	/* Copy all the bytes from the bogus buffers. */
2036 	while (1) {
2037 		c = &sc->dc_ldata->dc_rx_list[i];
2038 		rxstat = letoh32(c->dc_status);
2039 		m = sc->dc_cdata.dc_rx_chain[i].sd_mbuf;
2040 		bcopy(mtod(m, char *), ptr, DC_RXLEN);
2041 		ptr += DC_RXLEN;
2042 		/* If this is the last buffer, break out. */
2043 		if (i == idx || rxstat & DC_RXSTAT_LASTFRAG)
2044 			break;
2045 		dc_newbuf(sc, i, m);
2046 		DC_INC(i, DC_RX_LIST_CNT);
2047 	}
2048 
2049 	/* Find the length of the actual receive frame. */
2050 	total_len = DC_RXBYTES(rxstat);
2051 
2052 	/* Scan backwards until we hit a non-zero byte. */
2053 	while(*ptr == 0x00)
2054 		ptr--;
2055 
2056 	/* Round off. */
2057 	if ((unsigned long)(ptr) & 0x3)
2058 		ptr -= 1;
2059 
2060 	/* Now find the start of the frame. */
2061 	ptr -= total_len;
2062 	if (ptr < sc->dc_pnic_rx_buf)
2063 		ptr = sc->dc_pnic_rx_buf;
2064 
2065 	/*
2066 	 * Now copy the salvaged frame to the last mbuf and fake up
2067 	 * the status word to make it look like a successful
2068  	 * frame reception.
2069 	 */
2070 	dc_newbuf(sc, i, m);
2071 	bcopy(ptr, mtod(m, char *), total_len);
2072 	cur_rx->dc_status = htole32(rxstat | DC_RXSTAT_FIRSTFRAG);
2073 }
2074 
2075 /*
2076  * This routine searches the RX ring for dirty descriptors in the
2077  * event that the rxeof routine falls out of sync with the chip's
2078  * current descriptor pointer. This may happen sometimes as a result
2079  * of a "no RX buffer available" condition that happens when the chip
2080  * consumes all of the RX buffers before the driver has a chance to
2081  * process the RX ring. This routine may need to be called more than
2082  * once to bring the driver back in sync with the chip, however we
2083  * should still be getting RX DONE interrupts to drive the search
2084  * for new packets in the RX ring, so we should catch up eventually.
2085  */
2086 int
2087 dc_rx_resync(sc)
2088 	struct dc_softc *sc;
2089 {
2090 	u_int32_t stat;
2091 	int i, pos, offset;
2092 
2093 	pos = sc->dc_cdata.dc_rx_prod;
2094 
2095 	for (i = 0; i < DC_RX_LIST_CNT; i++) {
2096 
2097 		offset = offsetof(struct dc_list_data, dc_rx_list[pos]);
2098 		bus_dmamap_sync(sc->sc_dmat, sc->sc_listmap,
2099 		    offset, sizeof(struct dc_desc),
2100 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2101 
2102 		stat = sc->dc_ldata->dc_rx_list[pos].dc_status;
2103 		if (!(stat & htole32(DC_RXSTAT_OWN)))
2104 			break;
2105 		DC_INC(pos, DC_RX_LIST_CNT);
2106 	}
2107 
2108 	/* If the ring really is empty, then just return. */
2109 	if (i == DC_RX_LIST_CNT)
2110 		return (0);
2111 
2112 	/* We've fallen behind the chip: catch it. */
2113 	sc->dc_cdata.dc_rx_prod = pos;
2114 
2115 	return (EAGAIN);
2116 }
2117 
2118 /*
2119  * A frame has been uploaded: pass the resulting mbuf chain up to
2120  * the higher level protocols.
2121  */
2122 void
2123 dc_rxeof(sc)
2124 	struct dc_softc *sc;
2125 {
2126 	struct mbuf *m;
2127 	struct ifnet *ifp;
2128 	struct dc_desc *cur_rx;
2129 	int i, offset, total_len = 0;
2130 	u_int32_t rxstat;
2131 
2132 	ifp = &sc->sc_arpcom.ac_if;
2133 	i = sc->dc_cdata.dc_rx_prod;
2134 
2135 	for(;;) {
2136 		struct mbuf	*m0 = NULL;
2137 
2138 		offset = offsetof(struct dc_list_data, dc_rx_list[i]);
2139 		bus_dmamap_sync(sc->sc_dmat, sc->sc_listmap,
2140 		    offset, sizeof(struct dc_desc),
2141 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2142 
2143 		cur_rx = &sc->dc_ldata->dc_rx_list[i];
2144 		rxstat = letoh32(cur_rx->dc_status);
2145 		if (rxstat & DC_RXSTAT_OWN)
2146 			break;
2147 
2148 		m = sc->dc_cdata.dc_rx_chain[i].sd_mbuf;
2149 		total_len = DC_RXBYTES(rxstat);
2150 
2151 		bus_dmamap_sync(sc->sc_dmat, sc->dc_cdata.dc_rx_chain[i].sd_map,
2152 		    0, sc->dc_cdata.dc_rx_chain[i].sd_map->dm_mapsize,
2153 		    BUS_DMASYNC_POSTREAD);
2154 
2155 		if (sc->dc_flags & DC_PNIC_RX_BUG_WAR) {
2156 			if ((rxstat & DC_WHOLEFRAME) != DC_WHOLEFRAME) {
2157 				if (rxstat & DC_RXSTAT_FIRSTFRAG)
2158 					sc->dc_pnic_rx_bug_save = i;
2159 				if ((rxstat & DC_RXSTAT_LASTFRAG) == 0) {
2160 					DC_INC(i, DC_RX_LIST_CNT);
2161 					continue;
2162 				}
2163 				dc_pnic_rx_bug_war(sc, i);
2164 				rxstat = letoh32(cur_rx->dc_status);
2165 				total_len = DC_RXBYTES(rxstat);
2166 			}
2167 		}
2168 
2169 		sc->dc_cdata.dc_rx_chain[i].sd_mbuf = NULL;
2170 
2171 		/*
2172 		 * If an error occurs, update stats, clear the
2173 		 * status word and leave the mbuf cluster in place:
2174 		 * it should simply get re-used next time this descriptor
2175 	 	 * comes up in the ring.
2176 		 */
2177 		if (rxstat & DC_RXSTAT_RXERR
2178 #if NVLAN > 0
2179 		/*
2180 		 * If VLANs are enabled, allow frames up to 4 bytes
2181 		 * longer than the MTU. This should really check if
2182 		 * the giant packet has a vlan tag
2183 		 */
2184 		 && ((rxstat & (DC_RXSTAT_GIANT|DC_RXSTAT_LASTFRAG)) == 0
2185 		 && total_len <= ifp->if_mtu + 4)
2186 #endif
2187 		    ) {
2188 			ifp->if_ierrors++;
2189 			if (rxstat & DC_RXSTAT_COLLSEEN)
2190 				ifp->if_collisions++;
2191 			dc_newbuf(sc, i, m);
2192 			if (rxstat & DC_RXSTAT_CRCERR) {
2193 				DC_INC(i, DC_RX_LIST_CNT);
2194 				continue;
2195 			} else {
2196 				dc_init(sc);
2197 				return;
2198 			}
2199 		}
2200 
2201 		/* No errors; receive the packet. */
2202 		total_len -= ETHER_CRC_LEN;
2203 
2204 		m0 = m_devget(mtod(m, char *) - ETHER_ALIGN,
2205 		    total_len + ETHER_ALIGN, 0, ifp, NULL);
2206 		dc_newbuf(sc, i, m);
2207 		DC_INC(i, DC_RX_LIST_CNT);
2208 		if (m0 == NULL) {
2209 			ifp->if_ierrors++;
2210 			continue;
2211 		}
2212 		m_adj(m0, ETHER_ALIGN);
2213 		m = m0;
2214 
2215 		ifp->if_ipackets++;
2216 #if NBPFILTER > 0
2217 		if (ifp->if_bpf)
2218 			bpf_mtap(ifp->if_bpf, m);
2219 #endif
2220 		ether_input_mbuf(ifp, m);
2221 	}
2222 
2223 	sc->dc_cdata.dc_rx_prod = i;
2224 }
2225 
2226 /*
2227  * A frame was downloaded to the chip. It's safe for us to clean up
2228  * the list buffers.
2229  */
2230 
2231 void
2232 dc_txeof(sc)
2233 	struct dc_softc *sc;
2234 {
2235 	struct dc_desc *cur_tx = NULL;
2236 	struct ifnet *ifp;
2237 	int idx, offset;
2238 
2239 	ifp = &sc->sc_arpcom.ac_if;
2240 
2241 	/* Clear the timeout timer. */
2242 	ifp->if_timer = 0;
2243 
2244 	/*
2245 	 * Go through our tx list and free mbufs for those
2246 	 * frames that have been transmitted.
2247 	 */
2248 	idx = sc->dc_cdata.dc_tx_cons;
2249 	while(idx != sc->dc_cdata.dc_tx_prod) {
2250 		u_int32_t		txstat;
2251 
2252 		offset = offsetof(struct dc_list_data, dc_tx_list[idx]);
2253 		bus_dmamap_sync(sc->sc_dmat, sc->sc_listmap,
2254 		    offset, sizeof(struct dc_desc),
2255 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2256 
2257 		cur_tx = &sc->dc_ldata->dc_tx_list[idx];
2258 		txstat = letoh32(cur_tx->dc_status);
2259 
2260 		if (txstat & DC_TXSTAT_OWN)
2261 			break;
2262 
2263 		if (!(cur_tx->dc_ctl & htole32(DC_TXCTL_LASTFRAG)) ||
2264 		    cur_tx->dc_ctl & htole32(DC_TXCTL_SETUP)) {
2265 			sc->dc_cdata.dc_tx_cnt--;
2266 			if (cur_tx->dc_ctl & htole32(DC_TXCTL_SETUP)) {
2267 				/*
2268 				 * Yes, the PNIC is so brain damaged
2269 				 * that it will sometimes generate a TX
2270 				 * underrun error while DMAing the RX
2271 				 * filter setup frame. If we detect this,
2272 				 * we have to send the setup frame again,
2273 				 * or else the filter won't be programmed
2274 				 * correctly.
2275 				 */
2276 				if (DC_IS_PNIC(sc)) {
2277 					if (txstat & DC_TXSTAT_ERRSUM)
2278 						dc_setfilt(sc);
2279 				}
2280 				sc->dc_cdata.dc_tx_chain[idx].sd_mbuf = NULL;
2281 			}
2282 			DC_INC(idx, DC_TX_LIST_CNT);
2283 			continue;
2284 		}
2285 
2286 		if (DC_IS_XIRCOM(sc) || DC_IS_CONEXANT(sc)) {
2287 			/*
2288 			 * XXX: Why does my Xircom taunt me so?
2289 			 * For some reason it likes setting the CARRLOST flag
2290 			 * even when the carrier is there. wtf?!
2291 			 * Who knows, but Conexant chips have the
2292 			 * same problem. Maybe they took lessons
2293 			 * from Xircom.
2294 			 */
2295 			if (/*sc->dc_type == DC_TYPE_21143 &&*/
2296 			    sc->dc_pmode == DC_PMODE_MII &&
2297 			    ((txstat & 0xFFFF) & ~(DC_TXSTAT_ERRSUM|
2298 			    DC_TXSTAT_NOCARRIER)))
2299 				txstat &= ~DC_TXSTAT_ERRSUM;
2300 		} else {
2301 			if (/*sc->dc_type == DC_TYPE_21143 &&*/
2302 			    sc->dc_pmode == DC_PMODE_MII &&
2303 		    	    ((txstat & 0xFFFF) & ~(DC_TXSTAT_ERRSUM|
2304 		    	    DC_TXSTAT_NOCARRIER|DC_TXSTAT_CARRLOST)))
2305 				txstat &= ~DC_TXSTAT_ERRSUM;
2306 		}
2307 
2308 		if (txstat & DC_TXSTAT_ERRSUM) {
2309 			ifp->if_oerrors++;
2310 			if (txstat & DC_TXSTAT_EXCESSCOLL)
2311 				ifp->if_collisions++;
2312 			if (txstat & DC_TXSTAT_LATECOLL)
2313 				ifp->if_collisions++;
2314 			if (!(txstat & DC_TXSTAT_UNDERRUN)) {
2315 				dc_init(sc);
2316 				return;
2317 			}
2318 		}
2319 
2320 		ifp->if_collisions += (txstat & DC_TXSTAT_COLLCNT) >> 3;
2321 
2322 		ifp->if_opackets++;
2323 		if (sc->dc_cdata.dc_tx_chain[idx].sd_map->dm_nsegs != 0) {
2324 			bus_dmamap_t map = sc->dc_cdata.dc_tx_chain[idx].sd_map;
2325 
2326 			bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
2327 			    BUS_DMASYNC_POSTWRITE);
2328 			bus_dmamap_unload(sc->sc_dmat, map);
2329 		}
2330 		if (sc->dc_cdata.dc_tx_chain[idx].sd_mbuf != NULL) {
2331 			m_freem(sc->dc_cdata.dc_tx_chain[idx].sd_mbuf);
2332 			sc->dc_cdata.dc_tx_chain[idx].sd_mbuf = NULL;
2333 		}
2334 
2335 		bus_dmamap_sync(sc->sc_dmat, sc->sc_listmap,
2336 		    offset, sizeof(struct dc_desc),
2337 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2338 
2339 		sc->dc_cdata.dc_tx_cnt--;
2340 		DC_INC(idx, DC_TX_LIST_CNT);
2341 	}
2342 
2343 	sc->dc_cdata.dc_tx_cons = idx;
2344 	if (cur_tx != NULL)
2345 		ifp->if_flags &= ~IFF_OACTIVE;
2346 }
2347 
2348 void
2349 dc_tick(xsc)
2350 	void *xsc;
2351 {
2352 	struct dc_softc *sc = (struct dc_softc *)xsc;
2353 	struct mii_data *mii;
2354 	struct ifnet *ifp;
2355 	int s;
2356 	u_int32_t r;
2357 
2358 	s = splimp();
2359 
2360 	ifp = &sc->sc_arpcom.ac_if;
2361 	mii = &sc->sc_mii;
2362 
2363 	if (sc->dc_flags & DC_REDUCED_MII_POLL) {
2364 		if (sc->dc_flags & DC_21143_NWAY) {
2365 			r = CSR_READ_4(sc, DC_10BTSTAT);
2366 			if (IFM_SUBTYPE(mii->mii_media_active) ==
2367 			    IFM_100_TX && (r & DC_TSTAT_LS100)) {
2368 				sc->dc_link = 0;
2369 				mii_mediachg(mii);
2370 			}
2371 			if (IFM_SUBTYPE(mii->mii_media_active) ==
2372 			    IFM_10_T && (r & DC_TSTAT_LS10)) {
2373 				sc->dc_link = 0;
2374 				mii_mediachg(mii);
2375 			}
2376 			if (sc->dc_link == 0)
2377 				mii_tick(mii);
2378 		} else {
2379 			r = CSR_READ_4(sc, DC_ISR);
2380 			if ((r & DC_ISR_RX_STATE) == DC_RXSTATE_WAIT &&
2381 			    sc->dc_cdata.dc_tx_cnt == 0 && !DC_IS_ASIX(sc))
2382 				mii_tick(mii);
2383 			if (!(mii->mii_media_status & IFM_ACTIVE))
2384 				sc->dc_link = 0;
2385 		}
2386 	} else
2387 		mii_tick(mii);
2388 
2389 	/*
2390 	 * When the init routine completes, we expect to be able to send
2391 	 * packets right away, and in fact the network code will send a
2392 	 * gratuitous ARP the moment the init routine marks the interface
2393 	 * as running. However, even though the MAC may have been initialized,
2394 	 * there may be a delay of a few seconds before the PHY completes
2395 	 * autonegotiation and the link is brought up. Any transmissions
2396 	 * made during that delay will be lost. Dealing with this is tricky:
2397 	 * we can't just pause in the init routine while waiting for the
2398 	 * PHY to come ready since that would bring the whole system to
2399 	 * a screeching halt for several seconds.
2400 	 *
2401 	 * What we do here is prevent the TX start routine from sending
2402 	 * any packets until a link has been established. After the
2403 	 * interface has been initialized, the tick routine will poll
2404 	 * the state of the PHY until the IFM_ACTIVE flag is set. Until
2405 	 * that time, packets will stay in the send queue, and once the
2406 	 * link comes up, they will be flushed out to the wire.
2407 	 */
2408 	if (!sc->dc_link) {
2409 		mii_pollstat(mii);
2410 		if (mii->mii_media_status & IFM_ACTIVE &&
2411 		    IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) {
2412 			sc->dc_link++;
2413 			if (IFQ_IS_EMPTY(&ifp->if_snd) == 0)
2414 				dc_start(ifp);
2415 		}
2416 	}
2417 
2418 	if (sc->dc_flags & DC_21143_NWAY && !sc->dc_link)
2419 		timeout_add(&sc->dc_tick_tmo, hz / 10);
2420 	else
2421 		timeout_add(&sc->dc_tick_tmo, hz);
2422 
2423 	splx(s);
2424 }
2425 
2426 int
2427 dc_intr(arg)
2428 	void *arg;
2429 {
2430 	struct dc_softc *sc;
2431 	struct ifnet *ifp;
2432 	u_int32_t status;
2433 	int claimed = 0;
2434 
2435 	sc = arg;
2436 	ifp = &sc->sc_arpcom.ac_if;
2437 
2438 	/* Supress unwanted interrupts */
2439 	if (!(ifp->if_flags & IFF_UP)) {
2440 		if (CSR_READ_4(sc, DC_ISR) & DC_INTRS)
2441 			dc_stop(sc);
2442 		return (claimed);
2443 	}
2444 
2445 	/* Disable interrupts. */
2446 	CSR_WRITE_4(sc, DC_IMR, 0x00000000);
2447 
2448 	while(((status = CSR_READ_4(sc, DC_ISR)) & DC_INTRS) &&
2449 	    status != 0xFFFFFFFF) {
2450 
2451 		claimed = 1;
2452 		CSR_WRITE_4(sc, DC_ISR, status);
2453 
2454 		if (status & DC_ISR_RX_OK) {
2455 			int		curpkts;
2456 			curpkts = ifp->if_ipackets;
2457 			dc_rxeof(sc);
2458 			if (curpkts == ifp->if_ipackets) {
2459 				while(dc_rx_resync(sc))
2460 					dc_rxeof(sc);
2461 			}
2462 		}
2463 
2464 		if (status & (DC_ISR_TX_OK|DC_ISR_TX_NOBUF))
2465 			dc_txeof(sc);
2466 
2467 		if (status & DC_ISR_TX_IDLE) {
2468 			dc_txeof(sc);
2469 			if (sc->dc_cdata.dc_tx_cnt) {
2470 				DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_TX_ON);
2471 				CSR_WRITE_4(sc, DC_TXSTART, 0xFFFFFFFF);
2472 			}
2473 		}
2474 
2475 		if (status & DC_ISR_TX_UNDERRUN) {
2476 			u_int32_t		cfg;
2477 
2478 			if (DC_IS_DAVICOM(sc) || DC_IS_INTEL(sc))
2479 				dc_init(sc);
2480 			cfg = CSR_READ_4(sc, DC_NETCFG);
2481 			cfg &= ~DC_NETCFG_TX_THRESH;
2482 			if (sc->dc_txthresh == DC_TXTHRESH_160BYTES) {
2483 				DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_STORENFWD);
2484 			} else if (sc->dc_flags & DC_TX_STORENFWD) {
2485 			} else {
2486 				sc->dc_txthresh += 0x4000;
2487 				CSR_WRITE_4(sc, DC_NETCFG, cfg);
2488 				DC_SETBIT(sc, DC_NETCFG, sc->dc_txthresh);
2489 				DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_STORENFWD);
2490 			}
2491 		}
2492 
2493 		if ((status & DC_ISR_RX_WATDOGTIMEO)
2494 		    || (status & DC_ISR_RX_NOBUF)) {
2495 			int		curpkts;
2496 			curpkts = ifp->if_ipackets;
2497 			dc_rxeof(sc);
2498 			if (curpkts == ifp->if_ipackets) {
2499 				while(dc_rx_resync(sc))
2500 					dc_rxeof(sc);
2501 			}
2502 		}
2503 
2504 		if (status & DC_ISR_BUS_ERR) {
2505 			dc_reset(sc);
2506 			dc_init(sc);
2507 		}
2508 	}
2509 
2510 	/* Re-enable interrupts. */
2511 	CSR_WRITE_4(sc, DC_IMR, DC_INTRS);
2512 
2513 	if (IFQ_IS_EMPTY(&ifp->if_snd) == 0)
2514 		dc_start(ifp);
2515 
2516 	return (claimed);
2517 }
2518 
2519 /*
2520  * Encapsulate an mbuf chain in a descriptor by coupling the mbuf data
2521  * pointers to the fragment pointers.
2522  */
2523 int
2524 dc_encap(sc, m_head, txidx)
2525 	struct dc_softc *sc;
2526 	struct mbuf *m_head;
2527 	u_int32_t *txidx;
2528 {
2529 	struct dc_desc *f = NULL;
2530 	int frag, cur, cnt = 0, i;
2531 	bus_dmamap_t map;
2532 
2533 	/*
2534  	 * Start packing the mbufs in this chain into
2535 	 * the fragment pointers. Stop when we run out
2536  	 * of fragments or hit the end of the mbuf chain.
2537 	 */
2538 	map = sc->sc_tx_sparemap;
2539 
2540 	if (bus_dmamap_load_mbuf(sc->sc_dmat, map,
2541 	    m_head, BUS_DMA_NOWAIT) != 0)
2542 		return (ENOBUFS);
2543 
2544 	cur = frag = *txidx;
2545 
2546 	for (i = 0; i < map->dm_nsegs; i++) {
2547 		if (sc->dc_flags & DC_TX_ADMTEK_WAR) {
2548 			if (*txidx != sc->dc_cdata.dc_tx_prod &&
2549 			    frag == (DC_TX_LIST_CNT - 1)) {
2550 				bus_dmamap_unload(sc->sc_dmat, map);
2551 				return (ENOBUFS);
2552 			}
2553 		}
2554 		if ((DC_TX_LIST_CNT -
2555 		    (sc->dc_cdata.dc_tx_cnt + cnt)) < 5) {
2556 			bus_dmamap_unload(sc->sc_dmat, map);
2557 			return (ENOBUFS);
2558 		}
2559 
2560 		f = &sc->dc_ldata->dc_tx_list[frag];
2561 		f->dc_ctl = htole32(DC_TXCTL_TLINK | map->dm_segs[i].ds_len);
2562 		if (cnt == 0) {
2563 			f->dc_status = htole32(0);
2564 			f->dc_ctl |= htole32(DC_TXCTL_FIRSTFRAG);
2565 		} else
2566 			f->dc_status = htole32(DC_TXSTAT_OWN);
2567 		f->dc_data = htole32(map->dm_segs[i].ds_addr);
2568 		cur = frag;
2569 		DC_INC(frag, DC_TX_LIST_CNT);
2570 		cnt++;
2571 	}
2572 
2573 	sc->dc_cdata.dc_tx_cnt += cnt;
2574 	sc->dc_cdata.dc_tx_chain[cur].sd_mbuf = m_head;
2575 	sc->sc_tx_sparemap = sc->dc_cdata.dc_tx_chain[cur].sd_map;
2576 	sc->dc_cdata.dc_tx_chain[cur].sd_map = map;
2577 	sc->dc_ldata->dc_tx_list[cur].dc_ctl |= htole32(DC_TXCTL_LASTFRAG);
2578 	if (sc->dc_flags & DC_TX_INTR_FIRSTFRAG)
2579 		sc->dc_ldata->dc_tx_list[*txidx].dc_ctl |=
2580 		    htole32(DC_TXCTL_FINT);
2581 	if (sc->dc_flags & DC_TX_INTR_ALWAYS)
2582 		sc->dc_ldata->dc_tx_list[cur].dc_ctl |=
2583 		    htole32(DC_TXCTL_FINT);
2584 	if (sc->dc_flags & DC_TX_USE_TX_INTR && sc->dc_cdata.dc_tx_cnt > 64)
2585 		sc->dc_ldata->dc_tx_list[cur].dc_ctl |=
2586 		    htole32(DC_TXCTL_FINT);
2587 #ifdef ALTQ
2588 	else if ((sc->dc_flags & DC_TX_USE_TX_INTR) &&
2589 		 TBR_IS_ENABLED(&sc->sc_arpcom.ac_if.if_snd))
2590 		sc->dc_ldata->dc_tx_list[cur].dc_ctl |=
2591 		    htole32(DC_TXCTL_FINT);
2592 #endif
2593 	bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
2594 	    BUS_DMASYNC_PREWRITE);
2595 
2596 	sc->dc_ldata->dc_tx_list[*txidx].dc_status = htole32(DC_TXSTAT_OWN);
2597 
2598 	bus_dmamap_sync(sc->sc_dmat, sc->sc_listmap,
2599 	    offsetof(struct dc_list_data, dc_tx_list[*txidx]),
2600 	    sizeof(struct dc_desc) * cnt,
2601 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2602 
2603 	*txidx = frag;
2604 
2605 	return (0);
2606 }
2607 
2608 /*
2609  * Coalesce an mbuf chain into a single mbuf cluster buffer.
2610  * Needed for some really badly behaved chips that just can't
2611  * do scatter/gather correctly.
2612  */
2613 int
2614 dc_coal(sc, m_head)
2615 	struct dc_softc *sc;
2616 	struct mbuf **m_head;
2617 {
2618 	struct mbuf		*m_new, *m;
2619 
2620 	m = *m_head;
2621 	MGETHDR(m_new, M_DONTWAIT, MT_DATA);
2622 	if (m_new == NULL) {
2623 		printf("%s: no memory for tx list", sc->sc_dev.dv_xname);
2624 		return (ENOBUFS);
2625 	}
2626 	if (m->m_pkthdr.len > MHLEN) {
2627 		MCLGET(m_new, M_DONTWAIT);
2628 		if (!(m_new->m_flags & M_EXT)) {
2629 			m_freem(m_new);
2630 			printf("%s: no memory for tx list",
2631 			    sc->sc_dev.dv_xname);
2632 			return (ENOBUFS);
2633 		}
2634 	}
2635 	m_copydata(m, 0, m->m_pkthdr.len, mtod(m_new, caddr_t));
2636 	m_new->m_pkthdr.len = m_new->m_len = m->m_pkthdr.len;
2637 	m_freem(m);
2638 	*m_head = m_new;
2639 
2640 	return (0);
2641 }
2642 
2643 /*
2644  * Main transmit routine. To avoid having to do mbuf copies, we put pointers
2645  * to the mbuf data regions directly in the transmit lists. We also save a
2646  * copy of the pointers since the transmit list fragment pointers are
2647  * physical addresses.
2648  */
2649 
2650 void
2651 dc_start(ifp)
2652 	struct ifnet *ifp;
2653 {
2654 	struct dc_softc *sc;
2655 	struct mbuf *m_head = NULL;
2656 	int idx;
2657 
2658 	sc = ifp->if_softc;
2659 
2660 	if (!sc->dc_link)
2661 		return;
2662 
2663 	if (ifp->if_flags & IFF_OACTIVE)
2664 		return;
2665 
2666 	idx = sc->dc_cdata.dc_tx_prod;
2667 
2668 	while(sc->dc_cdata.dc_tx_chain[idx].sd_mbuf == NULL) {
2669 		IFQ_POLL(&ifp->if_snd, m_head);
2670 		if (m_head == NULL)
2671 			break;
2672 
2673 		if (sc->dc_flags & DC_TX_COALESCE) {
2674 #ifdef ALTQ
2675 			/* note: dc_coal breaks the poll-and-dequeue rule.
2676 			 * if dc_coal fails, we lose the packet.
2677 			 */
2678 #endif
2679 			IFQ_DEQUEUE(&ifp->if_snd, m_head);
2680 			if (dc_coal(sc, &m_head)) {
2681 				ifp->if_flags |= IFF_OACTIVE;
2682 				break;
2683 			}
2684 		}
2685 
2686 		if (dc_encap(sc, m_head, &idx)) {
2687 			ifp->if_flags |= IFF_OACTIVE;
2688 			break;
2689 		}
2690 
2691 		/* now we are committed to transmit the packet */
2692 		if (sc->dc_flags & DC_TX_COALESCE) {
2693 			/* if mbuf is coalesced, it is already dequeued */
2694 		} else
2695 			IFQ_DEQUEUE(&ifp->if_snd, m_head);
2696 
2697 		/*
2698 		 * If there's a BPF listener, bounce a copy of this frame
2699 		 * to him.
2700 		 */
2701 #if NBPFILTER > 0
2702 		if (ifp->if_bpf)
2703 			bpf_mtap(ifp->if_bpf, m_head);
2704 #endif
2705 		if (sc->dc_flags & DC_TX_ONE) {
2706 			ifp->if_flags |= IFF_OACTIVE;
2707 			break;
2708 		}
2709 	}
2710 	if (idx == sc->dc_cdata.dc_tx_prod)
2711 		return;
2712 
2713 	/* Transmit */
2714 	sc->dc_cdata.dc_tx_prod = idx;
2715 	if (!(sc->dc_flags & DC_TX_POLL))
2716 		CSR_WRITE_4(sc, DC_TXSTART, 0xFFFFFFFF);
2717 
2718 	/*
2719 	 * Set a timeout in case the chip goes out to lunch.
2720 	 */
2721 	ifp->if_timer = 5;
2722 }
2723 
2724 void
2725 dc_init(xsc)
2726 	void *xsc;
2727 {
2728 	struct dc_softc *sc = xsc;
2729 	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
2730 	struct mii_data *mii;
2731 	int s;
2732 
2733 	s = splimp();
2734 
2735 	mii = &sc->sc_mii;
2736 
2737 	/*
2738 	 * Cancel pending I/O and free all RX/TX buffers.
2739 	 */
2740 	dc_stop(sc);
2741 	dc_reset(sc);
2742 
2743 	/*
2744 	 * Set cache alignment and burst length.
2745 	 */
2746 	if (DC_IS_ASIX(sc) || DC_IS_DAVICOM(sc))
2747 		CSR_WRITE_4(sc, DC_BUSCTL, 0);
2748 	else
2749 		CSR_WRITE_4(sc, DC_BUSCTL, DC_BUSCTL_MRME|DC_BUSCTL_MRLE);
2750 	if (DC_IS_DAVICOM(sc) || DC_IS_INTEL(sc)) {
2751 		DC_SETBIT(sc, DC_BUSCTL, DC_BURSTLEN_USECA);
2752 	} else {
2753 		DC_SETBIT(sc, DC_BUSCTL, DC_BURSTLEN_16LONG);
2754 	}
2755 	if (sc->dc_flags & DC_TX_POLL)
2756 		DC_SETBIT(sc, DC_BUSCTL, DC_TXPOLL_1);
2757 	switch(sc->dc_cachesize) {
2758 	case 32:
2759 		DC_SETBIT(sc, DC_BUSCTL, DC_CACHEALIGN_32LONG);
2760 		break;
2761 	case 16:
2762 		DC_SETBIT(sc, DC_BUSCTL, DC_CACHEALIGN_16LONG);
2763 		break;
2764 	case 8:
2765 		DC_SETBIT(sc, DC_BUSCTL, DC_CACHEALIGN_8LONG);
2766 		break;
2767 	case 0:
2768 	default:
2769 		DC_SETBIT(sc, DC_BUSCTL, DC_CACHEALIGN_NONE);
2770 		break;
2771 	}
2772 
2773 	if (sc->dc_flags & DC_TX_STORENFWD)
2774 		DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_STORENFWD);
2775 	else {
2776 		if (sc->dc_txthresh == DC_TXTHRESH_160BYTES) {
2777 			DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_STORENFWD);
2778 		} else {
2779 			DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_STORENFWD);
2780 			DC_SETBIT(sc, DC_NETCFG, sc->dc_txthresh);
2781 		}
2782 	}
2783 
2784 	DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_NO_RXCRC);
2785 	DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_TX_BACKOFF);
2786 
2787 	if (DC_IS_MACRONIX(sc) || DC_IS_PNICII(sc)) {
2788 		/*
2789 		 * The app notes for the 98713 and 98715A say that
2790 		 * in order to have the chips operate properly, a magic
2791 		 * number must be written to CSR16. Macronix does not
2792 		 * document the meaning of these bits so there's no way
2793 		 * to know exactly what they do. The 98713 has a magic
2794 		 * number all its own; the rest all use a different one.
2795 		 */
2796 		DC_CLRBIT(sc, DC_MX_MAGICPACKET, 0xFFFF0000);
2797 		if (sc->dc_type == DC_TYPE_98713)
2798 			DC_SETBIT(sc, DC_MX_MAGICPACKET, DC_MX_MAGIC_98713);
2799 		else
2800 			DC_SETBIT(sc, DC_MX_MAGICPACKET, DC_MX_MAGIC_98715);
2801 	}
2802 
2803 	if (DC_IS_XIRCOM(sc)) {
2804 		CSR_WRITE_4(sc, DC_SIAGP, DC_SIAGP_WRITE_EN | DC_SIAGP_INT1_EN |
2805 		    DC_SIAGP_MD_GP2_OUTPUT | DC_SIAGP_MD_GP0_OUTPUT);
2806 		DELAY(10);
2807 		CSR_WRITE_4(sc, DC_SIAGP, DC_SIAGP_INT1_EN |
2808 		    DC_SIAGP_MD_GP2_OUTPUT | DC_SIAGP_MD_GP0_OUTPUT);
2809 		DELAY(10);
2810 	}
2811 
2812 	DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_TX_THRESH);
2813 	DC_SETBIT(sc, DC_NETCFG, DC_TXTHRESH_72BYTES);
2814 
2815 	/* Init circular RX list. */
2816 	if (dc_list_rx_init(sc) == ENOBUFS) {
2817 		printf("%s: initialization failed: no "
2818 		    "memory for rx buffers\n", sc->sc_dev.dv_xname);
2819 		dc_stop(sc);
2820 		splx(s);
2821 		return;
2822 	}
2823 
2824 	/*
2825 	 * Init tx descriptors.
2826 	 */
2827 	dc_list_tx_init(sc);
2828 
2829 	/*
2830 	 * Sync down both lists initialized.
2831 	 */
2832 	bus_dmamap_sync(sc->sc_dmat, sc->sc_listmap,
2833 	    0, sc->sc_listmap->dm_mapsize,
2834 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2835 
2836 	/*
2837 	 * Load the address of the RX list.
2838 	 */
2839 	CSR_WRITE_4(sc, DC_RXADDR, sc->sc_listmap->dm_segs[0].ds_addr +
2840 	    offsetof(struct dc_list_data, dc_rx_list[0]));
2841 	CSR_WRITE_4(sc, DC_TXADDR, sc->sc_listmap->dm_segs[0].ds_addr +
2842 	    offsetof(struct dc_list_data, dc_tx_list[0]));
2843 
2844 	/*
2845 	 * Enable interrupts.
2846 	 */
2847 	CSR_WRITE_4(sc, DC_IMR, DC_INTRS);
2848 	CSR_WRITE_4(sc, DC_ISR, 0xFFFFFFFF);
2849 
2850 	/* Enable transmitter. */
2851 	DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_TX_ON);
2852 
2853 	/*
2854 	 * If this is an Intel 21143 and we're not using the
2855 	 * MII port, program the LED control pins so we get
2856 	 * link and activity indications.
2857 	 */
2858 	if (sc->dc_flags & DC_TULIP_LEDS) {
2859 		CSR_WRITE_4(sc, DC_WATCHDOG,
2860 		    DC_WDOG_CTLWREN|DC_WDOG_LINK|DC_WDOG_ACTIVITY);
2861 		CSR_WRITE_4(sc, DC_WATCHDOG, 0);
2862 	}
2863 
2864 	/*
2865 	 * Load the RX/multicast filter. We do this sort of late
2866 	 * because the filter programming scheme on the 21143 and
2867 	 * some clones requires DMAing a setup frame via the TX
2868 	 * engine, and we need the transmitter enabled for that.
2869 	 */
2870 	dc_setfilt(sc);
2871 
2872 	/* Enable receiver. */
2873 	DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_RX_ON);
2874 	CSR_WRITE_4(sc, DC_RXSTART, 0xFFFFFFFF);
2875 
2876 	mii_mediachg(mii);
2877 	dc_setcfg(sc, sc->dc_if_media);
2878 
2879 	ifp->if_flags |= IFF_RUNNING;
2880 	ifp->if_flags &= ~IFF_OACTIVE;
2881 
2882 	splx(s);
2883 
2884 	timeout_set(&sc->dc_tick_tmo, dc_tick, sc);
2885 
2886 	if (IFM_SUBTYPE(mii->mii_media.ifm_media) == IFM_HPNA_1)
2887 		sc->dc_link = 1;
2888 	else {
2889 		if (sc->dc_flags & DC_21143_NWAY)
2890 			timeout_add(&sc->dc_tick_tmo, hz / 10);
2891 		else
2892 			timeout_add(&sc->dc_tick_tmo, hz);
2893 	}
2894 
2895 #ifdef SRM_MEDIA
2896 	if(sc->dc_srm_media) {
2897 		struct ifreq ifr;
2898 
2899 		ifr.ifr_media = sc->dc_srm_media;
2900 		ifmedia_ioctl(ifp, &ifr, &mii->mii_media, SIOCSIFMEDIA);
2901 		sc->dc_srm_media = 0;
2902 	}
2903 #endif
2904 }
2905 
2906 /*
2907  * Set media options.
2908  */
2909 int
2910 dc_ifmedia_upd(ifp)
2911 	struct ifnet *ifp;
2912 {
2913 	struct dc_softc *sc;
2914 	struct mii_data *mii;
2915 	struct ifmedia *ifm;
2916 
2917 	sc = ifp->if_softc;
2918 	mii = &sc->sc_mii;
2919 	mii_mediachg(mii);
2920 
2921 	ifm = &mii->mii_media;
2922 
2923 	if (DC_IS_DAVICOM(sc) &&
2924 	    IFM_SUBTYPE(ifm->ifm_media) == IFM_HPNA_1)
2925 		dc_setcfg(sc, ifm->ifm_media);
2926 	else
2927 		sc->dc_link = 0;
2928 
2929 	return (0);
2930 }
2931 
2932 /*
2933  * Report current media status.
2934  */
2935 void
2936 dc_ifmedia_sts(ifp, ifmr)
2937 	struct ifnet *ifp;
2938 	struct ifmediareq *ifmr;
2939 {
2940 	struct dc_softc *sc;
2941 	struct mii_data *mii;
2942 	struct ifmedia *ifm;
2943 
2944 	sc = ifp->if_softc;
2945 	mii = &sc->sc_mii;
2946 	mii_pollstat(mii);
2947 	ifm = &mii->mii_media;
2948 	if (DC_IS_DAVICOM(sc)) {
2949 		if (IFM_SUBTYPE(ifm->ifm_media) == IFM_HPNA_1) {
2950 			ifmr->ifm_active = ifm->ifm_media;
2951 			ifmr->ifm_status = 0;
2952 			return;
2953 		}
2954 	}
2955 	ifmr->ifm_active = mii->mii_media_active;
2956 	ifmr->ifm_status = mii->mii_media_status;
2957 }
2958 
2959 int
2960 dc_ioctl(ifp, command, data)
2961 	struct ifnet *ifp;
2962 	u_long command;
2963 	caddr_t data;
2964 {
2965 	struct dc_softc		*sc = ifp->if_softc;
2966 	struct ifreq		*ifr = (struct ifreq *) data;
2967 	struct ifaddr		*ifa = (struct ifaddr *)data;
2968 	struct mii_data		*mii;
2969 	int			s, error = 0;
2970 
2971 	s = splimp();
2972 
2973 	if ((error = ether_ioctl(ifp, &sc->sc_arpcom, command, data)) > 0) {
2974 		splx(s);
2975 		return (error);
2976 	}
2977 
2978 	switch(command) {
2979 	case SIOCSIFADDR:
2980 		ifp->if_flags |= IFF_UP;
2981 		switch (ifa->ifa_addr->sa_family) {
2982 		case AF_INET:
2983 			dc_init(sc);
2984 			arp_ifinit(&sc->sc_arpcom, ifa);
2985 			break;
2986 		default:
2987 			dc_init(sc);
2988 			break;
2989 		}
2990 		break;
2991 	case SIOCSIFFLAGS:
2992 		if (ifp->if_flags & IFF_UP) {
2993 			if (ifp->if_flags & IFF_RUNNING &&
2994 			    ifp->if_flags & IFF_PROMISC &&
2995 			    !(sc->dc_if_flags & IFF_PROMISC)) {
2996 				dc_setfilt(sc);
2997 			} else if (ifp->if_flags & IFF_RUNNING &&
2998 			    !(ifp->if_flags & IFF_PROMISC) &&
2999 			    sc->dc_if_flags & IFF_PROMISC) {
3000 				dc_setfilt(sc);
3001 			} else if (!(ifp->if_flags & IFF_RUNNING)) {
3002 				sc->dc_txthresh = 0;
3003 				dc_init(sc);
3004 			}
3005 		} else {
3006 			if (ifp->if_flags & IFF_RUNNING)
3007 				dc_stop(sc);
3008 		}
3009 		sc->dc_if_flags = ifp->if_flags;
3010 		error = 0;
3011 		break;
3012 	case SIOCADDMULTI:
3013 	case SIOCDELMULTI:
3014 		error = (command == SIOCADDMULTI) ?
3015 		    ether_addmulti(ifr, &sc->sc_arpcom) :
3016 		    ether_delmulti(ifr, &sc->sc_arpcom);
3017 
3018 		if (error == ENETRESET) {
3019 			/*
3020 			 * Multicast list has changed; set the hardware
3021 			 * filter accordingly.
3022 			 */
3023 			dc_setfilt(sc);
3024 			error = 0;
3025 		}
3026 		break;
3027 	case SIOCGIFMEDIA:
3028 	case SIOCSIFMEDIA:
3029 		mii = &sc->sc_mii;
3030 		error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command);
3031 #ifdef SRM_MEDIA
3032 		if (sc->dc_srm_media)
3033 			sc->dc_srm_media = 0;
3034 #endif
3035 		break;
3036 	default:
3037 		error = EINVAL;
3038 		break;
3039 	}
3040 
3041 	splx(s);
3042 
3043 	return (error);
3044 }
3045 
3046 void
3047 dc_watchdog(ifp)
3048 	struct ifnet *ifp;
3049 {
3050 	struct dc_softc *sc;
3051 
3052 	sc = ifp->if_softc;
3053 
3054 	ifp->if_oerrors++;
3055 	printf("%s: watchdog timeout\n", sc->sc_dev.dv_xname);
3056 
3057 	dc_stop(sc);
3058 	dc_reset(sc);
3059 	dc_init(sc);
3060 
3061 	if (IFQ_IS_EMPTY(&ifp->if_snd) == 0)
3062 		dc_start(ifp);
3063 }
3064 
3065 /*
3066  * Stop the adapter and free any mbufs allocated to the
3067  * RX and TX lists.
3068  */
3069 void
3070 dc_stop(sc)
3071 	struct dc_softc *sc;
3072 {
3073 	struct ifnet *ifp;
3074 	int i;
3075 
3076 	ifp = &sc->sc_arpcom.ac_if;
3077 	ifp->if_timer = 0;
3078 
3079 	timeout_del(&sc->dc_tick_tmo);
3080 
3081 	DC_CLRBIT(sc, DC_NETCFG, (DC_NETCFG_RX_ON|DC_NETCFG_TX_ON));
3082 	CSR_WRITE_4(sc, DC_IMR, 0x00000000);
3083 	CSR_WRITE_4(sc, DC_TXADDR, 0x00000000);
3084 	CSR_WRITE_4(sc, DC_RXADDR, 0x00000000);
3085 	sc->dc_link = 0;
3086 
3087 	/*
3088 	 * Free data in the RX lists.
3089 	 */
3090 	for (i = 0; i < DC_RX_LIST_CNT; i++) {
3091 		if (sc->dc_cdata.dc_rx_chain[i].sd_map->dm_nsegs != 0) {
3092 			bus_dmamap_t map = sc->dc_cdata.dc_rx_chain[i].sd_map;
3093 
3094 			bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
3095 			    BUS_DMASYNC_POSTREAD);
3096 			bus_dmamap_unload(sc->sc_dmat, map);
3097 		}
3098 		if (sc->dc_cdata.dc_rx_chain[i].sd_mbuf != NULL) {
3099 			m_freem(sc->dc_cdata.dc_rx_chain[i].sd_mbuf);
3100 			sc->dc_cdata.dc_rx_chain[i].sd_mbuf = NULL;
3101 		}
3102 	}
3103 	bzero((char *)&sc->dc_ldata->dc_rx_list,
3104 		sizeof(sc->dc_ldata->dc_rx_list));
3105 
3106 	/*
3107 	 * Free the TX list buffers.
3108 	 */
3109 	for (i = 0; i < DC_TX_LIST_CNT; i++) {
3110 		if (sc->dc_cdata.dc_tx_chain[i].sd_map->dm_nsegs != 0) {
3111 			bus_dmamap_t map = sc->dc_cdata.dc_tx_chain[i].sd_map;
3112 
3113 			bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
3114 			    BUS_DMASYNC_POSTWRITE);
3115 			bus_dmamap_unload(sc->sc_dmat, map);
3116 		}
3117 		if (sc->dc_cdata.dc_tx_chain[i].sd_mbuf != NULL) {
3118 			if (sc->dc_ldata->dc_tx_list[i].dc_ctl &
3119 			    htole32(DC_TXCTL_SETUP)) {
3120 				sc->dc_cdata.dc_tx_chain[i].sd_mbuf = NULL;
3121 				continue;
3122 			}
3123 			m_freem(sc->dc_cdata.dc_tx_chain[i].sd_mbuf);
3124 			sc->dc_cdata.dc_tx_chain[i].sd_mbuf = NULL;
3125 		}
3126 	}
3127 	bzero((char *)&sc->dc_ldata->dc_tx_list,
3128 		sizeof(sc->dc_ldata->dc_tx_list));
3129 
3130 	bus_dmamap_sync(sc->sc_dmat, sc->sc_listmap,
3131 	    0, sc->sc_listmap->dm_mapsize,
3132 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
3133 
3134 	ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
3135 }
3136 
3137 /*
3138  * Stop all chip I/O so that the kernel's probe routines don't
3139  * get confused by errant DMAs when rebooting.
3140  */
3141 void
3142 dc_shutdown(v)
3143 	void *v;
3144 {
3145 	struct dc_softc *sc = (struct dc_softc *)v;
3146 
3147 	dc_stop(sc);
3148 }
3149 
3150 struct cfdriver dc_cd = {
3151 	0, "dc", DV_IFNET
3152 };
3153