xref: /openbsd-src/sys/dev/ic/dc.c (revision 2b0358df1d88d06ef4139321dd05bd5e05d91eaf)
1 /*	$OpenBSD: dc.c,v 1.108 2009/01/11 16:54:59 blambert Exp $	*/
2 
3 /*
4  * Copyright (c) 1997, 1998, 1999
5  *	Bill Paul <wpaul@ee.columbia.edu>.  All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  * 3. All advertising materials mentioning features or use of this software
16  *    must display the following acknowledgement:
17  *	This product includes software developed by Bill Paul.
18  * 4. Neither the name of the author nor the names of any co-contributors
19  *    may be used to endorse or promote products derived from this software
20  *    without specific prior written permission.
21  *
22  * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
23  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25  * ARE DISCLAIMED.  IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
26  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
27  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
28  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
29  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
30  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
31  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
32  * THE POSSIBILITY OF SUCH DAMAGE.
33  *
34  * $FreeBSD: src/sys/pci/if_dc.c,v 1.43 2001/01/19 23:55:07 wpaul Exp $
35  */
36 
37 /*
38  * DEC "tulip" clone ethernet driver. Supports the DEC/Intel 21143
39  * series chips and several workalikes including the following:
40  *
41  * Macronix 98713/98715/98725/98727/98732 PMAC (www.macronix.com)
42  * Macronix/Lite-On 82c115 PNIC II (www.macronix.com)
43  * Lite-On 82c168/82c169 PNIC (www.litecom.com)
44  * ASIX Electronics AX88140A (www.asix.com.tw)
45  * ASIX Electronics AX88141 (www.asix.com.tw)
46  * ADMtek AL981 (www.admtek.com.tw)
47  * ADMtek AN983 (www.admtek.com.tw)
48  * Davicom DM9100, DM9102, DM9102A (www.davicom8.com)
49  * Accton EN1217, EN2242 (www.accton.com)
50  * Xircom X3201 (www.xircom.com)
51  *
52  * Datasheets for the 21143 are available at developer.intel.com.
53  * Datasheets for the clone parts can be found at their respective sites.
54  * (Except for the PNIC; see www.freebsd.org/~wpaul/PNIC/pnic.ps.gz.)
55  * The PNIC II is essentially a Macronix 98715A chip; the only difference
56  * worth noting is that its multicast hash table is only 128 bits wide
57  * instead of 512.
58  *
59  * Written by Bill Paul <wpaul@ee.columbia.edu>
60  * Electrical Engineering Department
61  * Columbia University, New York City
62  */
63 
64 /*
65  * The Intel 21143 is the successor to the DEC 21140. It is basically
66  * the same as the 21140 but with a few new features. The 21143 supports
67  * three kinds of media attachments:
68  *
69  * o MII port, for 10Mbps and 100Mbps support and NWAY
70  *   autonegotiation provided by an external PHY.
71  * o SYM port, for symbol mode 100Mbps support.
72  * o 10baseT port.
73  * o AUI/BNC port.
74  *
75  * The 100Mbps SYM port and 10baseT port can be used together in
76  * combination with the internal NWAY support to create a 10/100
77  * autosensing configuration.
78  *
79  * Note that not all tulip workalikes are handled in this driver: we only
80  * deal with those which are relatively well behaved. The Winbond is
81  * handled separately due to its different register offsets and the
82  * special handling needed for its various bugs. The PNIC is handled
83  * here, but I'm not thrilled about it.
84  *
85  * All of the workalike chips use some form of MII transceiver support
86  * with the exception of the Macronix chips, which also have a SYM port.
87  * The ASIX AX88140A is also documented to have a SYM port, but all
88  * the cards I've seen use an MII transceiver, probably because the
89  * AX88140A doesn't support internal NWAY.
90  */
91 
92 #include "bpfilter.h"
93 
94 #include <sys/param.h>
95 #include <sys/systm.h>
96 #include <sys/mbuf.h>
97 #include <sys/protosw.h>
98 #include <sys/socket.h>
99 #include <sys/ioctl.h>
100 #include <sys/errno.h>
101 #include <sys/malloc.h>
102 #include <sys/kernel.h>
103 #include <sys/device.h>
104 #include <sys/timeout.h>
105 
106 #include <net/if.h>
107 #include <net/if_dl.h>
108 #include <net/if_types.h>
109 
110 #ifdef INET
111 #include <netinet/in.h>
112 #include <netinet/in_systm.h>
113 #include <netinet/in_var.h>
114 #include <netinet/ip.h>
115 #include <netinet/if_ether.h>
116 #endif
117 
118 #include <net/if_media.h>
119 
120 #if NBPFILTER > 0
121 #include <net/bpf.h>
122 #endif
123 
124 #include <dev/mii/mii.h>
125 #include <dev/mii/miivar.h>
126 
127 #include <machine/bus.h>
128 #include <dev/pci/pcidevs.h>
129 
130 #include <dev/ic/dcreg.h>
131 
132 int dc_intr(void *);
133 void dc_shutdown(void *);
134 void dc_power(int, void *);
135 struct dc_type *dc_devtype(void *);
136 int dc_newbuf(struct dc_softc *, int, struct mbuf *);
137 int dc_encap(struct dc_softc *, struct mbuf *, u_int32_t *);
138 int dc_coal(struct dc_softc *, struct mbuf **);
139 
140 void dc_pnic_rx_bug_war(struct dc_softc *, int);
141 int dc_rx_resync(struct dc_softc *);
142 void dc_rxeof(struct dc_softc *);
143 void dc_txeof(struct dc_softc *);
144 void dc_tick(void *);
145 void dc_tx_underrun(struct dc_softc *);
146 void dc_start(struct ifnet *);
147 int dc_ioctl(struct ifnet *, u_long, caddr_t);
148 void dc_init(void *);
149 void dc_stop(struct dc_softc *);
150 void dc_watchdog(struct ifnet *);
151 int dc_ifmedia_upd(struct ifnet *);
152 void dc_ifmedia_sts(struct ifnet *, struct ifmediareq *);
153 
154 void dc_delay(struct dc_softc *);
155 void dc_eeprom_width(struct dc_softc *);
156 void dc_eeprom_idle(struct dc_softc *);
157 void dc_eeprom_putbyte(struct dc_softc *, int);
158 void dc_eeprom_getword(struct dc_softc *, int, u_int16_t *);
159 void dc_eeprom_getword_pnic(struct dc_softc *, int, u_int16_t *);
160 void dc_eeprom_getword_xircom(struct dc_softc *, int, u_int16_t *);
161 void dc_read_eeprom(struct dc_softc *, caddr_t, int, int, int);
162 
163 void dc_mii_writebit(struct dc_softc *, int);
164 int dc_mii_readbit(struct dc_softc *);
165 void dc_mii_sync(struct dc_softc *);
166 void dc_mii_send(struct dc_softc *, u_int32_t, int);
167 int dc_mii_readreg(struct dc_softc *, struct dc_mii_frame *);
168 int dc_mii_writereg(struct dc_softc *, struct dc_mii_frame *);
169 int dc_miibus_readreg(struct device *, int, int);
170 void dc_miibus_writereg(struct device *, int, int, int);
171 void dc_miibus_statchg(struct device *);
172 
173 void dc_setcfg(struct dc_softc *, int);
174 u_int32_t dc_crc_le(struct dc_softc *, caddr_t);
175 u_int32_t dc_crc_be(caddr_t);
176 void dc_setfilt_21143(struct dc_softc *);
177 void dc_setfilt_asix(struct dc_softc *);
178 void dc_setfilt_admtek(struct dc_softc *);
179 void dc_setfilt_xircom(struct dc_softc *);
180 
181 void dc_setfilt(struct dc_softc *);
182 
183 void dc_reset(struct dc_softc *);
184 int dc_list_rx_init(struct dc_softc *);
185 int dc_list_tx_init(struct dc_softc *);
186 
187 void dc_read_srom(struct dc_softc *, int);
188 void dc_parse_21143_srom(struct dc_softc *);
189 void dc_decode_leaf_sia(struct dc_softc *,
190 				     struct dc_eblock_sia *);
191 void dc_decode_leaf_mii(struct dc_softc *,
192 				     struct dc_eblock_mii *);
193 void dc_decode_leaf_sym(struct dc_softc *,
194 				     struct dc_eblock_sym *);
195 void dc_apply_fixup(struct dc_softc *, int);
196 
197 #define DC_SETBIT(sc, reg, x)				\
198 	CSR_WRITE_4(sc, reg, CSR_READ_4(sc, reg) | (x))
199 
200 #define DC_CLRBIT(sc, reg, x)				\
201 	CSR_WRITE_4(sc, reg, CSR_READ_4(sc, reg) & ~(x))
202 
203 #define SIO_SET(x)	DC_SETBIT(sc, DC_SIO, (x))
204 #define SIO_CLR(x)	DC_CLRBIT(sc, DC_SIO, (x))
205 
206 void
207 dc_delay(struct dc_softc *sc)
208 {
209 	int idx;
210 
211 	for (idx = (300 / 33) + 1; idx > 0; idx--)
212 		CSR_READ_4(sc, DC_BUSCTL);
213 }
214 
215 void
216 dc_eeprom_width(struct dc_softc *sc)
217 {
218 	int i;
219 
220 	/* Force EEPROM to idle state. */
221 	dc_eeprom_idle(sc);
222 
223 	/* Enter EEPROM access mode. */
224 	CSR_WRITE_4(sc, DC_SIO, DC_SIO_EESEL);
225 	dc_delay(sc);
226 	DC_SETBIT(sc, DC_SIO, DC_SIO_ROMCTL_READ);
227 	dc_delay(sc);
228 	DC_CLRBIT(sc, DC_SIO, DC_SIO_EE_CLK);
229 	dc_delay(sc);
230 	DC_SETBIT(sc, DC_SIO, DC_SIO_EE_CS);
231 	dc_delay(sc);
232 
233 	for (i = 3; i--;) {
234 		if (6 & (1 << i))
235 			DC_SETBIT(sc, DC_SIO, DC_SIO_EE_DATAIN);
236 		else
237 			DC_CLRBIT(sc, DC_SIO, DC_SIO_EE_DATAIN);
238 		dc_delay(sc);
239 		DC_SETBIT(sc, DC_SIO, DC_SIO_EE_CLK);
240 		dc_delay(sc);
241 		DC_CLRBIT(sc, DC_SIO, DC_SIO_EE_CLK);
242 		dc_delay(sc);
243 	}
244 
245 	for (i = 1; i <= 12; i++) {
246 		DC_SETBIT(sc, DC_SIO, DC_SIO_EE_CLK);
247 		dc_delay(sc);
248 		if (!(CSR_READ_4(sc, DC_SIO) & DC_SIO_EE_DATAOUT)) {
249 			DC_CLRBIT(sc, DC_SIO, DC_SIO_EE_CLK);
250 			dc_delay(sc);
251 			break;
252 		}
253 		DC_CLRBIT(sc, DC_SIO, DC_SIO_EE_CLK);
254 		dc_delay(sc);
255 	}
256 
257 	/* Turn off EEPROM access mode. */
258 	dc_eeprom_idle(sc);
259 
260 	if (i < 4 || i > 12)
261 		sc->dc_romwidth = 6;
262 	else
263 		sc->dc_romwidth = i;
264 
265 	/* Enter EEPROM access mode. */
266 	CSR_WRITE_4(sc, DC_SIO, DC_SIO_EESEL);
267 	dc_delay(sc);
268 	DC_SETBIT(sc, DC_SIO, DC_SIO_ROMCTL_READ);
269 	dc_delay(sc);
270 	DC_CLRBIT(sc, DC_SIO, DC_SIO_EE_CLK);
271 	dc_delay(sc);
272 	DC_SETBIT(sc, DC_SIO, DC_SIO_EE_CS);
273 	dc_delay(sc);
274 
275 	/* Turn off EEPROM access mode. */
276 	dc_eeprom_idle(sc);
277 }
278 
279 void
280 dc_eeprom_idle(struct dc_softc *sc)
281 {
282 	int i;
283 
284 	CSR_WRITE_4(sc, DC_SIO, DC_SIO_EESEL);
285 	dc_delay(sc);
286 	DC_SETBIT(sc, DC_SIO, DC_SIO_ROMCTL_READ);
287 	dc_delay(sc);
288 	DC_CLRBIT(sc, DC_SIO, DC_SIO_EE_CLK);
289 	dc_delay(sc);
290 	DC_SETBIT(sc, DC_SIO, DC_SIO_EE_CS);
291 	dc_delay(sc);
292 
293 	for (i = 0; i < 25; i++) {
294 		DC_CLRBIT(sc, DC_SIO, DC_SIO_EE_CLK);
295 		dc_delay(sc);
296 		DC_SETBIT(sc, DC_SIO, DC_SIO_EE_CLK);
297 		dc_delay(sc);
298 	}
299 
300 	DC_CLRBIT(sc, DC_SIO, DC_SIO_EE_CLK);
301 	dc_delay(sc);
302 	DC_CLRBIT(sc, DC_SIO, DC_SIO_EE_CS);
303 	dc_delay(sc);
304 	CSR_WRITE_4(sc, DC_SIO, 0x00000000);
305 }
306 
307 /*
308  * Send a read command and address to the EEPROM, check for ACK.
309  */
310 void
311 dc_eeprom_putbyte(struct dc_softc *sc, int addr)
312 {
313 	int d, i;
314 
315 	d = DC_EECMD_READ >> 6;
316 
317 	for (i = 3; i--; ) {
318 		if (d & (1 << i))
319 			DC_SETBIT(sc, DC_SIO, DC_SIO_EE_DATAIN);
320 		else
321 			DC_CLRBIT(sc, DC_SIO, DC_SIO_EE_DATAIN);
322 		dc_delay(sc);
323 		DC_SETBIT(sc, DC_SIO, DC_SIO_EE_CLK);
324 		dc_delay(sc);
325 		DC_CLRBIT(sc, DC_SIO, DC_SIO_EE_CLK);
326 		dc_delay(sc);
327 	}
328 
329 	/*
330 	 * Feed in each bit and strobe the clock.
331 	 */
332 	for (i = sc->dc_romwidth; i--;) {
333 		if (addr & (1 << i)) {
334 			SIO_SET(DC_SIO_EE_DATAIN);
335 		} else {
336 			SIO_CLR(DC_SIO_EE_DATAIN);
337 		}
338 		dc_delay(sc);
339 		SIO_SET(DC_SIO_EE_CLK);
340 		dc_delay(sc);
341 		SIO_CLR(DC_SIO_EE_CLK);
342 		dc_delay(sc);
343 	}
344 }
345 
346 /*
347  * Read a word of data stored in the EEPROM at address 'addr.'
348  * The PNIC 82c168/82c169 has its own non-standard way to read
349  * the EEPROM.
350  */
351 void
352 dc_eeprom_getword_pnic(struct dc_softc *sc, int addr, u_int16_t *dest)
353 {
354 	int i;
355 	u_int32_t r;
356 
357 	CSR_WRITE_4(sc, DC_PN_SIOCTL, DC_PN_EEOPCODE_READ|addr);
358 
359 	for (i = 0; i < DC_TIMEOUT; i++) {
360 		DELAY(1);
361 		r = CSR_READ_4(sc, DC_SIO);
362 		if (!(r & DC_PN_SIOCTL_BUSY)) {
363 			*dest = (u_int16_t)(r & 0xFFFF);
364 			return;
365 		}
366 	}
367 }
368 
369 /*
370  * Read a word of data stored in the EEPROM at address 'addr.'
371  * The Xircom X3201 has its own non-standard way to read
372  * the EEPROM, too.
373  */
374 void
375 dc_eeprom_getword_xircom(struct dc_softc *sc, int addr, u_int16_t *dest)
376 {
377 	SIO_SET(DC_SIO_ROMSEL | DC_SIO_ROMCTL_READ);
378 
379 	addr *= 2;
380 	CSR_WRITE_4(sc, DC_ROM, addr | 0x160);
381 	*dest = (u_int16_t)CSR_READ_4(sc, DC_SIO) & 0xff;
382 	addr += 1;
383 	CSR_WRITE_4(sc, DC_ROM, addr | 0x160);
384 	*dest |= ((u_int16_t)CSR_READ_4(sc, DC_SIO) & 0xff) << 8;
385 
386 	SIO_CLR(DC_SIO_ROMSEL | DC_SIO_ROMCTL_READ);
387 }
388 
389 /*
390  * Read a word of data stored in the EEPROM at address 'addr.'
391  */
392 void
393 dc_eeprom_getword(struct dc_softc *sc, int addr, u_int16_t *dest)
394 {
395 	int i;
396 	u_int16_t word = 0;
397 
398 	/* Force EEPROM to idle state. */
399 	dc_eeprom_idle(sc);
400 
401 	/* Enter EEPROM access mode. */
402 	CSR_WRITE_4(sc, DC_SIO, DC_SIO_EESEL);
403 	dc_delay(sc);
404 	DC_SETBIT(sc, DC_SIO,  DC_SIO_ROMCTL_READ);
405 	dc_delay(sc);
406 	DC_CLRBIT(sc, DC_SIO, DC_SIO_EE_CLK);
407 	dc_delay(sc);
408 	DC_SETBIT(sc, DC_SIO, DC_SIO_EE_CS);
409 	dc_delay(sc);
410 
411 	/*
412 	 * Send address of word we want to read.
413 	 */
414 	dc_eeprom_putbyte(sc, addr);
415 
416 	/*
417 	 * Start reading bits from EEPROM.
418 	 */
419 	for (i = 0x8000; i; i >>= 1) {
420 		SIO_SET(DC_SIO_EE_CLK);
421 		dc_delay(sc);
422 		if (CSR_READ_4(sc, DC_SIO) & DC_SIO_EE_DATAOUT)
423 			word |= i;
424 		dc_delay(sc);
425 		SIO_CLR(DC_SIO_EE_CLK);
426 		dc_delay(sc);
427 	}
428 
429 	/* Turn off EEPROM access mode. */
430 	dc_eeprom_idle(sc);
431 
432 	*dest = word;
433 }
434 
435 /*
436  * Read a sequence of words from the EEPROM.
437  */
438 void
439 dc_read_eeprom(struct dc_softc *sc, caddr_t dest, int off, int cnt,
440     int swap)
441 {
442 	int i;
443 	u_int16_t word = 0, *ptr;
444 
445 	for (i = 0; i < cnt; i++) {
446 		if (DC_IS_PNIC(sc))
447 			dc_eeprom_getword_pnic(sc, off + i, &word);
448 		else if (DC_IS_XIRCOM(sc))
449 			dc_eeprom_getword_xircom(sc, off + i, &word);
450 		else
451 			dc_eeprom_getword(sc, off + i, &word);
452 		ptr = (u_int16_t *)(dest + (i * 2));
453 		if (swap)
454 			*ptr = betoh16(word);
455 		else
456 			*ptr = letoh16(word);
457 	}
458 }
459 
460 /*
461  * The following two routines are taken from the Macronix 98713
462  * Application Notes pp.19-21.
463  */
464 /*
465  * Write a bit to the MII bus.
466  */
467 void
468 dc_mii_writebit(struct dc_softc *sc, int bit)
469 {
470 	if (bit)
471 		CSR_WRITE_4(sc, DC_SIO,
472 		    DC_SIO_ROMCTL_WRITE|DC_SIO_MII_DATAOUT);
473 	else
474 		CSR_WRITE_4(sc, DC_SIO, DC_SIO_ROMCTL_WRITE);
475 
476 	DC_SETBIT(sc, DC_SIO, DC_SIO_MII_CLK);
477 	DC_CLRBIT(sc, DC_SIO, DC_SIO_MII_CLK);
478 }
479 
480 /*
481  * Read a bit from the MII bus.
482  */
483 int
484 dc_mii_readbit(struct dc_softc *sc)
485 {
486 	CSR_WRITE_4(sc, DC_SIO, DC_SIO_ROMCTL_READ|DC_SIO_MII_DIR);
487 	CSR_READ_4(sc, DC_SIO);
488 	DC_SETBIT(sc, DC_SIO, DC_SIO_MII_CLK);
489 	DC_CLRBIT(sc, DC_SIO, DC_SIO_MII_CLK);
490 	if (CSR_READ_4(sc, DC_SIO) & DC_SIO_MII_DATAIN)
491 		return (1);
492 	return (0);
493 }
494 
495 /*
496  * Sync the PHYs by setting data bit and strobing the clock 32 times.
497  */
498 void
499 dc_mii_sync(struct dc_softc *sc)
500 {
501 	int i;
502 
503 	CSR_WRITE_4(sc, DC_SIO, DC_SIO_ROMCTL_WRITE);
504 
505 	for (i = 0; i < 32; i++)
506 		dc_mii_writebit(sc, 1);
507 }
508 
509 /*
510  * Clock a series of bits through the MII.
511  */
512 void
513 dc_mii_send(struct dc_softc *sc, u_int32_t bits, int cnt)
514 {
515 	int i;
516 
517 	for (i = (0x1 << (cnt - 1)); i; i >>= 1)
518 		dc_mii_writebit(sc, bits & i);
519 }
520 
521 /*
522  * Read an PHY register through the MII.
523  */
524 int
525 dc_mii_readreg(struct dc_softc *sc, struct dc_mii_frame *frame)
526 {
527 	int i, ack, s;
528 
529 	s = splnet();
530 
531 	/*
532 	 * Set up frame for RX.
533 	 */
534 	frame->mii_stdelim = DC_MII_STARTDELIM;
535 	frame->mii_opcode = DC_MII_READOP;
536 	frame->mii_turnaround = 0;
537 	frame->mii_data = 0;
538 
539 	/*
540 	 * Sync the PHYs.
541 	 */
542 	dc_mii_sync(sc);
543 
544 	/*
545 	 * Send command/address info.
546 	 */
547 	dc_mii_send(sc, frame->mii_stdelim, 2);
548 	dc_mii_send(sc, frame->mii_opcode, 2);
549 	dc_mii_send(sc, frame->mii_phyaddr, 5);
550 	dc_mii_send(sc, frame->mii_regaddr, 5);
551 
552 #ifdef notdef
553 	/* Idle bit */
554 	dc_mii_writebit(sc, 1);
555 	dc_mii_writebit(sc, 0);
556 #endif
557 
558 	/* Check for ack */
559 	ack = dc_mii_readbit(sc);
560 
561 	/*
562 	 * Now try reading data bits. If the ack failed, we still
563 	 * need to clock through 16 cycles to keep the PHY(s) in sync.
564 	 */
565 	if (ack) {
566 		for(i = 0; i < 16; i++) {
567 			dc_mii_readbit(sc);
568 		}
569 		goto fail;
570 	}
571 
572 	for (i = 0x8000; i; i >>= 1) {
573 		if (!ack) {
574 			if (dc_mii_readbit(sc))
575 				frame->mii_data |= i;
576 		}
577 	}
578 
579 fail:
580 
581 	dc_mii_writebit(sc, 0);
582 	dc_mii_writebit(sc, 0);
583 
584 	splx(s);
585 
586 	if (ack)
587 		return (1);
588 	return (0);
589 }
590 
591 /*
592  * Write to a PHY register through the MII.
593  */
594 int
595 dc_mii_writereg(struct dc_softc *sc, struct dc_mii_frame *frame)
596 {
597 	int s;
598 
599 	s = splnet();
600 	/*
601 	 * Set up frame for TX.
602 	 */
603 
604 	frame->mii_stdelim = DC_MII_STARTDELIM;
605 	frame->mii_opcode = DC_MII_WRITEOP;
606 	frame->mii_turnaround = DC_MII_TURNAROUND;
607 
608 	/*
609 	 * Sync the PHYs.
610 	 */
611 	dc_mii_sync(sc);
612 
613 	dc_mii_send(sc, frame->mii_stdelim, 2);
614 	dc_mii_send(sc, frame->mii_opcode, 2);
615 	dc_mii_send(sc, frame->mii_phyaddr, 5);
616 	dc_mii_send(sc, frame->mii_regaddr, 5);
617 	dc_mii_send(sc, frame->mii_turnaround, 2);
618 	dc_mii_send(sc, frame->mii_data, 16);
619 
620 	/* Idle bit. */
621 	dc_mii_writebit(sc, 0);
622 	dc_mii_writebit(sc, 0);
623 
624 	splx(s);
625 	return (0);
626 }
627 
628 int
629 dc_miibus_readreg(struct device *self, int phy, int reg)
630 {
631 	struct dc_mii_frame frame;
632 	struct dc_softc *sc = (struct dc_softc *)self;
633 	int i, rval, phy_reg;
634 
635 	/*
636 	 * Note: both the AL981 and AN983 have internal PHYs,
637 	 * however the AL981 provides direct access to the PHY
638 	 * registers while the AN983 uses a serial MII interface.
639 	 * The AN983's MII interface is also buggy in that you
640 	 * can read from any MII address (0 to 31), but only address 1
641 	 * behaves normally. To deal with both cases, we pretend
642 	 * that the PHY is at MII address 1.
643 	 */
644 	if (DC_IS_ADMTEK(sc) && phy != DC_ADMTEK_PHYADDR)
645 		return (0);
646 
647 	/*
648 	 * Note: the ukphy probs of the RS7112 report a PHY at
649 	 * MII address 0 (possibly HomePNA?) and 1 (ethernet)
650 	 * so we only respond to correct one.
651 	 */
652 	if (DC_IS_CONEXANT(sc) && phy != DC_CONEXANT_PHYADDR)
653 		return (0);
654 
655 	if (sc->dc_pmode != DC_PMODE_MII) {
656 		if (phy == (MII_NPHY - 1)) {
657 			switch(reg) {
658 			case MII_BMSR:
659 				/*
660 				 * Fake something to make the probe
661 				 * code think there's a PHY here.
662 				 */
663 				return (BMSR_MEDIAMASK);
664 				break;
665 			case MII_PHYIDR1:
666 				if (DC_IS_PNIC(sc))
667 					return (PCI_VENDOR_LITEON);
668 				return (PCI_VENDOR_DEC);
669 				break;
670 			case MII_PHYIDR2:
671 				if (DC_IS_PNIC(sc))
672 					return (PCI_PRODUCT_LITEON_PNIC);
673 				return (PCI_PRODUCT_DEC_21142);
674 				break;
675 			default:
676 				return (0);
677 				break;
678 			}
679 		} else
680 			return (0);
681 	}
682 
683 	if (DC_IS_PNIC(sc)) {
684 		CSR_WRITE_4(sc, DC_PN_MII, DC_PN_MIIOPCODE_READ |
685 		    (phy << 23) | (reg << 18));
686 		for (i = 0; i < DC_TIMEOUT; i++) {
687 			DELAY(1);
688 			rval = CSR_READ_4(sc, DC_PN_MII);
689 			if (!(rval & DC_PN_MII_BUSY)) {
690 				rval &= 0xFFFF;
691 				return (rval == 0xFFFF ? 0 : rval);
692 			}
693 		}
694 		return (0);
695 	}
696 
697 	if (DC_IS_COMET(sc)) {
698 		switch(reg) {
699 		case MII_BMCR:
700 			phy_reg = DC_AL_BMCR;
701 			break;
702 		case MII_BMSR:
703 			phy_reg = DC_AL_BMSR;
704 			break;
705 		case MII_PHYIDR1:
706 			phy_reg = DC_AL_VENID;
707 			break;
708 		case MII_PHYIDR2:
709 			phy_reg = DC_AL_DEVID;
710 			break;
711 		case MII_ANAR:
712 			phy_reg = DC_AL_ANAR;
713 			break;
714 		case MII_ANLPAR:
715 			phy_reg = DC_AL_LPAR;
716 			break;
717 		case MII_ANER:
718 			phy_reg = DC_AL_ANER;
719 			break;
720 		default:
721 			printf("%s: phy_read: bad phy register %x\n",
722 			    sc->sc_dev.dv_xname, reg);
723 			return (0);
724 			break;
725 		}
726 
727 		rval = CSR_READ_4(sc, phy_reg) & 0x0000FFFF;
728 
729 		if (rval == 0xFFFF)
730 			return (0);
731 		return (rval);
732 	}
733 
734 	bzero(&frame, sizeof(frame));
735 
736 	frame.mii_phyaddr = phy;
737 	frame.mii_regaddr = reg;
738 	if (sc->dc_type == DC_TYPE_98713) {
739 		phy_reg = CSR_READ_4(sc, DC_NETCFG);
740 		CSR_WRITE_4(sc, DC_NETCFG, phy_reg & ~DC_NETCFG_PORTSEL);
741 	}
742 	dc_mii_readreg(sc, &frame);
743 	if (sc->dc_type == DC_TYPE_98713)
744 		CSR_WRITE_4(sc, DC_NETCFG, phy_reg);
745 
746 	return (frame.mii_data);
747 }
748 
749 void
750 dc_miibus_writereg(struct device *self, int phy, int reg, int data)
751 {
752 	struct dc_softc *sc = (struct dc_softc *)self;
753 	struct dc_mii_frame frame;
754 	int i, phy_reg;
755 
756 	bzero((char *)&frame, sizeof(frame));
757 
758 	if (DC_IS_ADMTEK(sc) && phy != DC_ADMTEK_PHYADDR)
759 		return;
760 	if (DC_IS_CONEXANT(sc) && phy != DC_CONEXANT_PHYADDR)
761 		return;
762 
763 	if (DC_IS_PNIC(sc)) {
764 		CSR_WRITE_4(sc, DC_PN_MII, DC_PN_MIIOPCODE_WRITE |
765 		    (phy << 23) | (reg << 10) | data);
766 		for (i = 0; i < DC_TIMEOUT; i++) {
767 			if (!(CSR_READ_4(sc, DC_PN_MII) & DC_PN_MII_BUSY))
768 				break;
769 		}
770 		return;
771 	}
772 
773 	if (DC_IS_COMET(sc)) {
774 		switch(reg) {
775 		case MII_BMCR:
776 			phy_reg = DC_AL_BMCR;
777 			break;
778 		case MII_BMSR:
779 			phy_reg = DC_AL_BMSR;
780 			break;
781 		case MII_PHYIDR1:
782 			phy_reg = DC_AL_VENID;
783 			break;
784 		case MII_PHYIDR2:
785 			phy_reg = DC_AL_DEVID;
786 			break;
787 		case MII_ANAR:
788 			phy_reg = DC_AL_ANAR;
789 			break;
790 		case MII_ANLPAR:
791 			phy_reg = DC_AL_LPAR;
792 			break;
793 		case MII_ANER:
794 			phy_reg = DC_AL_ANER;
795 			break;
796 		default:
797 			printf("%s: phy_write: bad phy register %x\n",
798 			    sc->sc_dev.dv_xname, reg);
799 			return;
800 			break;
801 		}
802 
803 		CSR_WRITE_4(sc, phy_reg, data);
804 		return;
805 	}
806 
807 	frame.mii_phyaddr = phy;
808 	frame.mii_regaddr = reg;
809 	frame.mii_data = data;
810 
811 	if (sc->dc_type == DC_TYPE_98713) {
812 		phy_reg = CSR_READ_4(sc, DC_NETCFG);
813 		CSR_WRITE_4(sc, DC_NETCFG, phy_reg & ~DC_NETCFG_PORTSEL);
814 	}
815 	dc_mii_writereg(sc, &frame);
816 	if (sc->dc_type == DC_TYPE_98713)
817 		CSR_WRITE_4(sc, DC_NETCFG, phy_reg);
818 }
819 
820 void
821 dc_miibus_statchg(struct device *self)
822 {
823 	struct dc_softc *sc = (struct dc_softc *)self;
824 	struct mii_data *mii;
825 	struct ifmedia *ifm;
826 
827 	if (DC_IS_ADMTEK(sc))
828 		return;
829 
830 	mii = &sc->sc_mii;
831 	ifm = &mii->mii_media;
832 	if (DC_IS_DAVICOM(sc) && IFM_SUBTYPE(ifm->ifm_media) == IFM_HPNA_1) {
833 		dc_setcfg(sc, ifm->ifm_media);
834 		sc->dc_if_media = ifm->ifm_media;
835 	} else {
836 		dc_setcfg(sc, mii->mii_media_active);
837 		sc->dc_if_media = mii->mii_media_active;
838 	}
839 }
840 
841 #define DC_BITS_512	9
842 #define DC_BITS_128	7
843 #define DC_BITS_64	6
844 
845 u_int32_t
846 dc_crc_le(struct dc_softc *sc, caddr_t addr)
847 {
848 	u_int32_t crc;
849 
850 	/* Compute CRC for the address value. */
851 	crc = ether_crc32_le(addr, ETHER_ADDR_LEN);
852 
853 	/*
854 	 * The hash table on the PNIC II and the MX98715AEC-C/D/E
855 	 * chips is only 128 bits wide.
856 	 */
857 	if (sc->dc_flags & DC_128BIT_HASH)
858 		return (crc & ((1 << DC_BITS_128) - 1));
859 
860 	/* The hash table on the MX98715BEC is only 64 bits wide. */
861 	if (sc->dc_flags & DC_64BIT_HASH)
862 		return (crc & ((1 << DC_BITS_64) - 1));
863 
864 	/* Xircom's hash filtering table is different (read: weird) */
865 	/* Xircom uses the LEAST significant bits */
866 	if (DC_IS_XIRCOM(sc)) {
867 		if ((crc & 0x180) == 0x180)
868 			return (crc & 0x0F) + (crc	& 0x70)*3 + (14 << 4);
869 		else
870 			return (crc & 0x1F) + ((crc>>1) & 0xF0)*3 + (12 << 4);
871 	}
872 
873 	return (crc & ((1 << DC_BITS_512) - 1));
874 }
875 
876 /*
877  * Calculate CRC of a multicast group address, return the lower 6 bits.
878  */
879 #define dc_crc_be(addr)	((ether_crc32_be(addr,ETHER_ADDR_LEN) >> 26) \
880 	& 0x0000003F)
881 
882 /*
883  * 21143-style RX filter setup routine. Filter programming is done by
884  * downloading a special setup frame into the TX engine. 21143, Macronix,
885  * PNIC, PNIC II and Davicom chips are programmed this way.
886  *
887  * We always program the chip using 'hash perfect' mode, i.e. one perfect
888  * address (our node address) and a 512-bit hash filter for multicast
889  * frames. We also sneak the broadcast address into the hash filter since
890  * we need that too.
891  */
892 void
893 dc_setfilt_21143(struct dc_softc *sc)
894 {
895 	struct dc_desc *sframe;
896 	u_int32_t h, *sp;
897 	struct arpcom *ac = &sc->sc_arpcom;
898 	struct ether_multi *enm;
899 	struct ether_multistep step;
900 	struct ifnet *ifp;
901 	int i;
902 
903 	ifp = &sc->sc_arpcom.ac_if;
904 
905 	i = sc->dc_cdata.dc_tx_prod;
906 	DC_INC(sc->dc_cdata.dc_tx_prod, DC_TX_LIST_CNT);
907 	sc->dc_cdata.dc_tx_cnt++;
908 	sframe = &sc->dc_ldata->dc_tx_list[i];
909 	sp = &sc->dc_ldata->dc_sbuf[0];
910 	bzero((char *)sp, DC_SFRAME_LEN);
911 
912 	sframe->dc_data = htole32(sc->sc_listmap->dm_segs[0].ds_addr +
913 	    offsetof(struct dc_list_data, dc_sbuf));
914 	sframe->dc_ctl = htole32(DC_SFRAME_LEN | DC_TXCTL_SETUP |
915 	    DC_TXCTL_TLINK | DC_FILTER_HASHPERF | DC_TXCTL_FINT);
916 
917 	sc->dc_cdata.dc_tx_chain[i].sd_mbuf =
918 	    (struct mbuf *)&sc->dc_ldata->dc_sbuf[0];
919 
920 	/* If we want promiscuous mode, set the allframes bit. */
921 	if (ifp->if_flags & IFF_PROMISC)
922 		DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_RX_PROMISC);
923 	else
924 		DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_RX_PROMISC);
925 
926 allmulti:
927 	if (ifp->if_flags & IFF_ALLMULTI)
928 		DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_RX_ALLMULTI);
929 	else {
930 		DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_RX_ALLMULTI);
931 
932 		ETHER_FIRST_MULTI(step, ac, enm);
933 		while (enm != NULL) {
934 			if (bcmp(enm->enm_addrlo, enm->enm_addrhi,
935 			    ETHER_ADDR_LEN)) {
936 				ifp->if_flags |= IFF_ALLMULTI;
937 				goto allmulti;
938 			}
939 
940 			h = dc_crc_le(sc, enm->enm_addrlo);
941 			sp[h >> 4] |= htole32(1 << (h & 0xF));
942 			ETHER_NEXT_MULTI(step, enm);
943 		}
944 	}
945 
946 	if (ifp->if_flags & IFF_BROADCAST) {
947 		h = dc_crc_le(sc, (caddr_t)&etherbroadcastaddr);
948 		sp[h >> 4] |= htole32(1 << (h & 0xF));
949 	}
950 
951 	/* Set our MAC address */
952 	sp[39] = DC_SP_FIELD(sc->sc_arpcom.ac_enaddr, 0);
953 	sp[40] = DC_SP_FIELD(sc->sc_arpcom.ac_enaddr, 1);
954 	sp[41] = DC_SP_FIELD(sc->sc_arpcom.ac_enaddr, 2);
955 
956 	bus_dmamap_sync(sc->sc_dmat, sc->sc_listmap,
957 	    offsetof(struct dc_list_data, dc_sbuf[0]),
958 	    sizeof(struct dc_list_data) -
959 	    offsetof(struct dc_list_data, dc_sbuf[0]),
960 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
961 
962 	sframe->dc_status = htole32(DC_TXSTAT_OWN);
963 
964 	bus_dmamap_sync(sc->sc_dmat, sc->sc_listmap,
965 	    offsetof(struct dc_list_data, dc_tx_list[i]),
966 	    sizeof(struct dc_desc), BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
967 
968 	CSR_WRITE_4(sc, DC_TXSTART, 0xFFFFFFFF);
969 
970 	/*
971 	 * The PNIC takes an exceedingly long time to process its
972 	 * setup frame; wait 10ms after posting the setup frame
973 	 * before proceeding, just so it has time to swallow its
974 	 * medicine.
975 	 */
976 	DELAY(10000);
977 
978 	ifp->if_timer = 5;
979 }
980 
981 void
982 dc_setfilt_admtek(struct dc_softc *sc)
983 {
984 	struct ifnet *ifp;
985 	struct arpcom *ac = &sc->sc_arpcom;
986 	struct ether_multi *enm;
987 	struct ether_multistep step;
988 	int h = 0;
989 	u_int32_t hashes[2] = { 0, 0 };
990 
991 	ifp = &sc->sc_arpcom.ac_if;
992 
993 	/* Init our MAC address */
994 	CSR_WRITE_4(sc, DC_AL_PAR0, ac->ac_enaddr[3] << 24 |
995 	    ac->ac_enaddr[2] << 16 | ac->ac_enaddr[1] << 8 | ac->ac_enaddr[0]);
996 	CSR_WRITE_4(sc, DC_AL_PAR1, ac->ac_enaddr[5] << 8 | ac->ac_enaddr[4]);
997 
998 	/* If we want promiscuous mode, set the allframes bit. */
999 	if (ifp->if_flags & IFF_PROMISC)
1000 		DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_RX_PROMISC);
1001 	else
1002 		DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_RX_PROMISC);
1003 
1004 allmulti:
1005 	if (ifp->if_flags & IFF_ALLMULTI)
1006 		DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_RX_ALLMULTI);
1007 	else
1008 		DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_RX_ALLMULTI);
1009 
1010 	/* first, zot all the existing hash bits */
1011 	CSR_WRITE_4(sc, DC_AL_MAR0, 0);
1012 	CSR_WRITE_4(sc, DC_AL_MAR1, 0);
1013 
1014 	/*
1015 	 * If we're already in promisc or allmulti mode, we
1016 	 * don't have to bother programming the multicast filter.
1017 	 */
1018 	if (ifp->if_flags & (IFF_PROMISC|IFF_ALLMULTI))
1019 		return;
1020 
1021 	/* now program new ones */
1022 	ETHER_FIRST_MULTI(step, ac, enm);
1023 	while (enm != NULL) {
1024 		if (bcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
1025 			ifp->if_flags |= IFF_ALLMULTI;
1026 			goto allmulti;
1027 		}
1028 
1029 		if (DC_IS_CENTAUR(sc))
1030 			h = dc_crc_le(sc, enm->enm_addrlo);
1031 		else
1032 			h = dc_crc_be(enm->enm_addrlo);
1033 		if (h < 32)
1034 			hashes[0] |= (1 << h);
1035 		else
1036 			hashes[1] |= (1 << (h - 32));
1037 		ETHER_NEXT_MULTI(step, enm);
1038 	}
1039 
1040 	CSR_WRITE_4(sc, DC_AL_MAR0, hashes[0]);
1041 	CSR_WRITE_4(sc, DC_AL_MAR1, hashes[1]);
1042 }
1043 
1044 void
1045 dc_setfilt_asix(struct dc_softc *sc)
1046 {
1047 	struct ifnet *ifp;
1048 	struct arpcom *ac = &sc->sc_arpcom;
1049 	struct ether_multi *enm;
1050 	struct ether_multistep step;
1051 	int h = 0;
1052 	u_int32_t hashes[2] = { 0, 0 };
1053 
1054 	ifp = &sc->sc_arpcom.ac_if;
1055 
1056 	/* Init our MAC address */
1057 	CSR_WRITE_4(sc, DC_AX_FILTIDX, DC_AX_FILTIDX_PAR0);
1058 	CSR_WRITE_4(sc, DC_AX_FILTDATA,
1059 	    *(u_int32_t *)(&sc->sc_arpcom.ac_enaddr[0]));
1060 	CSR_WRITE_4(sc, DC_AX_FILTIDX, DC_AX_FILTIDX_PAR1);
1061 	CSR_WRITE_4(sc, DC_AX_FILTDATA,
1062 	    *(u_int32_t *)(&sc->sc_arpcom.ac_enaddr[4]));
1063 
1064 	/* If we want promiscuous mode, set the allframes bit. */
1065 	if (ifp->if_flags & IFF_PROMISC)
1066 		DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_RX_PROMISC);
1067 	else
1068 		DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_RX_PROMISC);
1069 
1070 	if (ifp->if_flags & IFF_ALLMULTI)
1071 		DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_RX_ALLMULTI);
1072 	else
1073 		DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_RX_ALLMULTI);
1074 
1075 	/*
1076 	 * The ASIX chip has a special bit to enable reception
1077 	 * of broadcast frames.
1078 	 */
1079 	if (ifp->if_flags & IFF_BROADCAST)
1080 		DC_SETBIT(sc, DC_NETCFG, DC_AX_NETCFG_RX_BROAD);
1081 	else
1082 		DC_CLRBIT(sc, DC_NETCFG, DC_AX_NETCFG_RX_BROAD);
1083 
1084 	/* first, zot all the existing hash bits */
1085 	CSR_WRITE_4(sc, DC_AX_FILTIDX, DC_AX_FILTIDX_MAR0);
1086 	CSR_WRITE_4(sc, DC_AX_FILTDATA, 0);
1087 	CSR_WRITE_4(sc, DC_AX_FILTIDX, DC_AX_FILTIDX_MAR1);
1088 	CSR_WRITE_4(sc, DC_AX_FILTDATA, 0);
1089 
1090 	/*
1091 	 * If we're already in promisc or allmulti mode, we
1092 	 * don't have to bother programming the multicast filter.
1093 	 */
1094 	if (ifp->if_flags & (IFF_PROMISC|IFF_ALLMULTI))
1095 		return;
1096 
1097 	/* now program new ones */
1098 	ETHER_FIRST_MULTI(step, ac, enm);
1099 	while (enm != NULL) {
1100 		h = dc_crc_be(enm->enm_addrlo);
1101 		if (h < 32)
1102 			hashes[0] |= (1 << h);
1103 		else
1104 			hashes[1] |= (1 << (h - 32));
1105 		ETHER_NEXT_MULTI(step, enm);
1106 	}
1107 
1108 	CSR_WRITE_4(sc, DC_AX_FILTIDX, DC_AX_FILTIDX_MAR0);
1109 	CSR_WRITE_4(sc, DC_AX_FILTDATA, hashes[0]);
1110 	CSR_WRITE_4(sc, DC_AX_FILTIDX, DC_AX_FILTIDX_MAR1);
1111 	CSR_WRITE_4(sc, DC_AX_FILTDATA, hashes[1]);
1112 }
1113 
1114 void
1115 dc_setfilt_xircom(struct dc_softc *sc)
1116 {
1117 	struct dc_desc *sframe;
1118 	struct arpcom *ac = &sc->sc_arpcom;
1119 	struct ether_multi *enm;
1120 	struct ether_multistep step;
1121 	u_int32_t h, *sp;
1122 	struct ifnet *ifp;
1123 	int i;
1124 
1125 	ifp = &sc->sc_arpcom.ac_if;
1126 	DC_CLRBIT(sc, DC_NETCFG, (DC_NETCFG_TX_ON|DC_NETCFG_RX_ON));
1127 
1128 	i = sc->dc_cdata.dc_tx_prod;
1129 	DC_INC(sc->dc_cdata.dc_tx_prod, DC_TX_LIST_CNT);
1130 	sc->dc_cdata.dc_tx_cnt++;
1131 	sframe = &sc->dc_ldata->dc_tx_list[i];
1132 	sp = &sc->dc_ldata->dc_sbuf[0];
1133 	bzero((char *)sp, DC_SFRAME_LEN);
1134 
1135 	sframe->dc_data = htole32(sc->sc_listmap->dm_segs[0].ds_addr +
1136 	    offsetof(struct dc_list_data, dc_sbuf));
1137 	sframe->dc_ctl = htole32(DC_SFRAME_LEN | DC_TXCTL_SETUP |
1138 	    DC_TXCTL_TLINK | DC_FILTER_HASHPERF | DC_TXCTL_FINT);
1139 
1140 	sc->dc_cdata.dc_tx_chain[i].sd_mbuf =
1141 	    (struct mbuf *)&sc->dc_ldata->dc_sbuf[0];
1142 
1143 	/* If we want promiscuous mode, set the allframes bit. */
1144 	if (ifp->if_flags & IFF_PROMISC)
1145 		DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_RX_PROMISC);
1146 	else
1147 		DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_RX_PROMISC);
1148 
1149 	if (ifp->if_flags & IFF_ALLMULTI)
1150 		DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_RX_ALLMULTI);
1151 	else
1152 		DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_RX_ALLMULTI);
1153 
1154 	/* now program new ones */
1155 	ETHER_FIRST_MULTI(step, ac, enm);
1156 	while (enm != NULL) {
1157 		h = dc_crc_le(sc, enm->enm_addrlo);
1158 		sp[h >> 4] |= htole32(1 << (h & 0xF));
1159 		ETHER_NEXT_MULTI(step, enm);
1160 	}
1161 
1162 	if (ifp->if_flags & IFF_BROADCAST) {
1163 		h = dc_crc_le(sc, (caddr_t)&etherbroadcastaddr);
1164 		sp[h >> 4] |= htole32(1 << (h & 0xF));
1165 	}
1166 
1167 	/* Set our MAC address */
1168 	sp[0] = DC_SP_FIELD(sc->sc_arpcom.ac_enaddr, 0);
1169 	sp[1] = DC_SP_FIELD(sc->sc_arpcom.ac_enaddr, 1);
1170 	sp[2] = DC_SP_FIELD(sc->sc_arpcom.ac_enaddr, 2);
1171 
1172 	DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_TX_ON);
1173 	DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_RX_ON);
1174 	ifp->if_flags |= IFF_RUNNING;
1175 	sframe->dc_status = htole32(DC_TXSTAT_OWN);
1176 	CSR_WRITE_4(sc, DC_TXSTART, 0xFFFFFFFF);
1177 
1178 	/*
1179 	 * wait some time...
1180 	 */
1181 	DELAY(1000);
1182 
1183 	ifp->if_timer = 5;
1184 }
1185 
1186 void
1187 dc_setfilt(struct dc_softc *sc)
1188 {
1189 	if (DC_IS_INTEL(sc) || DC_IS_MACRONIX(sc) || DC_IS_PNIC(sc) ||
1190 	    DC_IS_PNICII(sc) || DC_IS_DAVICOM(sc) || DC_IS_CONEXANT(sc))
1191 		dc_setfilt_21143(sc);
1192 
1193 	if (DC_IS_ASIX(sc))
1194 		dc_setfilt_asix(sc);
1195 
1196 	if (DC_IS_ADMTEK(sc))
1197 		dc_setfilt_admtek(sc);
1198 
1199 	if (DC_IS_XIRCOM(sc))
1200 		dc_setfilt_xircom(sc);
1201 }
1202 
1203 /*
1204  * In order to fiddle with the
1205  * 'full-duplex' and '100Mbps' bits in the netconfig register, we
1206  * first have to put the transmit and/or receive logic in the idle state.
1207  */
1208 void
1209 dc_setcfg(struct dc_softc *sc, int media)
1210 {
1211 	int i, restart = 0;
1212 	u_int32_t isr;
1213 
1214 	if (IFM_SUBTYPE(media) == IFM_NONE)
1215 		return;
1216 
1217 	if (CSR_READ_4(sc, DC_NETCFG) & (DC_NETCFG_TX_ON|DC_NETCFG_RX_ON)) {
1218 		restart = 1;
1219 		DC_CLRBIT(sc, DC_NETCFG, (DC_NETCFG_TX_ON|DC_NETCFG_RX_ON));
1220 
1221 		for (i = 0; i < DC_TIMEOUT; i++) {
1222 			isr = CSR_READ_4(sc, DC_ISR);
1223 			if (isr & DC_ISR_TX_IDLE &&
1224 			    ((isr & DC_ISR_RX_STATE) == DC_RXSTATE_STOPPED ||
1225 			    (isr & DC_ISR_RX_STATE) == DC_RXSTATE_WAIT))
1226 				break;
1227 			DELAY(10);
1228 		}
1229 
1230 		if (i == DC_TIMEOUT) {
1231 			if (!(isr & DC_ISR_TX_IDLE) && !DC_IS_ASIX(sc))
1232 				printf("%s: failed to force tx to idle state\n",
1233 				    sc->sc_dev.dv_xname);
1234 			if (!((isr & DC_ISR_RX_STATE) == DC_RXSTATE_STOPPED ||
1235 			    (isr & DC_ISR_RX_STATE) == DC_RXSTATE_WAIT) &&
1236 			    !DC_HAS_BROKEN_RXSTATE(sc))
1237 				printf("%s: failed to force rx to idle state\n",
1238 				    sc->sc_dev.dv_xname);
1239 		}
1240 	}
1241 
1242 	if (IFM_SUBTYPE(media) == IFM_100_TX) {
1243 		DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_SPEEDSEL);
1244 		DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_HEARTBEAT);
1245 		if (sc->dc_pmode == DC_PMODE_MII) {
1246 			int watchdogreg;
1247 
1248 			if (DC_IS_INTEL(sc)) {
1249 			/* there's a write enable bit here that reads as 1 */
1250 				watchdogreg = CSR_READ_4(sc, DC_WATCHDOG);
1251 				watchdogreg &= ~DC_WDOG_CTLWREN;
1252 				watchdogreg |= DC_WDOG_JABBERDIS;
1253 				CSR_WRITE_4(sc, DC_WATCHDOG, watchdogreg);
1254 			} else {
1255 				DC_SETBIT(sc, DC_WATCHDOG, DC_WDOG_JABBERDIS);
1256 			}
1257 			DC_CLRBIT(sc, DC_NETCFG, (DC_NETCFG_PCS|
1258 			    DC_NETCFG_PORTSEL|DC_NETCFG_SCRAMBLER));
1259 			if (sc->dc_type == DC_TYPE_98713)
1260 				DC_SETBIT(sc, DC_NETCFG, (DC_NETCFG_PCS|
1261 				    DC_NETCFG_SCRAMBLER));
1262 			if (!DC_IS_DAVICOM(sc))
1263 				DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_PORTSEL);
1264 			DC_CLRBIT(sc, DC_10BTCTRL, 0xFFFF);
1265 			if (DC_IS_INTEL(sc))
1266 				dc_apply_fixup(sc, IFM_AUTO);
1267 		} else {
1268 			if (DC_IS_PNIC(sc)) {
1269 				DC_PN_GPIO_SETBIT(sc, DC_PN_GPIO_SPEEDSEL);
1270 				DC_PN_GPIO_SETBIT(sc, DC_PN_GPIO_100TX_LOOP);
1271 				DC_SETBIT(sc, DC_PN_NWAY, DC_PN_NWAY_SPEEDSEL);
1272 			}
1273 			DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_PORTSEL);
1274 			DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_PCS);
1275 			DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_SCRAMBLER);
1276 			if (DC_IS_INTEL(sc))
1277 				dc_apply_fixup(sc,
1278 				    (media & IFM_GMASK) == IFM_FDX ?
1279 				    IFM_100_TX|IFM_FDX : IFM_100_TX);
1280 		}
1281 	}
1282 
1283 	if (IFM_SUBTYPE(media) == IFM_10_T) {
1284 		DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_SPEEDSEL);
1285 		DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_HEARTBEAT);
1286 		if (sc->dc_pmode == DC_PMODE_MII) {
1287 			int watchdogreg;
1288 
1289 			if (DC_IS_INTEL(sc)) {
1290 			/* there's a write enable bit here that reads as 1 */
1291 				watchdogreg = CSR_READ_4(sc, DC_WATCHDOG);
1292 				watchdogreg &= ~DC_WDOG_CTLWREN;
1293 				watchdogreg |= DC_WDOG_JABBERDIS;
1294 				CSR_WRITE_4(sc, DC_WATCHDOG, watchdogreg);
1295 			} else {
1296 				DC_SETBIT(sc, DC_WATCHDOG, DC_WDOG_JABBERDIS);
1297 			}
1298 			DC_CLRBIT(sc, DC_NETCFG, (DC_NETCFG_PCS|
1299 			    DC_NETCFG_PORTSEL|DC_NETCFG_SCRAMBLER));
1300 			if (sc->dc_type == DC_TYPE_98713)
1301 				DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_PCS);
1302 			if (!DC_IS_DAVICOM(sc))
1303 				DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_PORTSEL);
1304 			DC_CLRBIT(sc, DC_10BTCTRL, 0xFFFF);
1305 			if (DC_IS_INTEL(sc))
1306 				dc_apply_fixup(sc, IFM_AUTO);
1307 		} else {
1308 			if (DC_IS_PNIC(sc)) {
1309 				DC_PN_GPIO_CLRBIT(sc, DC_PN_GPIO_SPEEDSEL);
1310 				DC_PN_GPIO_SETBIT(sc, DC_PN_GPIO_100TX_LOOP);
1311 				DC_CLRBIT(sc, DC_PN_NWAY, DC_PN_NWAY_SPEEDSEL);
1312 			}
1313 			DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_PORTSEL);
1314 			DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_PCS);
1315 			DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_SCRAMBLER);
1316 			if (DC_IS_INTEL(sc)) {
1317 				DC_CLRBIT(sc, DC_SIARESET, DC_SIA_RESET);
1318 				DC_CLRBIT(sc, DC_10BTCTRL, 0xFFFF);
1319 				if ((media & IFM_GMASK) == IFM_FDX)
1320 					DC_SETBIT(sc, DC_10BTCTRL, 0x7F3D);
1321 				else
1322 					DC_SETBIT(sc, DC_10BTCTRL, 0x7F3F);
1323 				DC_SETBIT(sc, DC_SIARESET, DC_SIA_RESET);
1324 				DC_CLRBIT(sc, DC_10BTCTRL,
1325 				    DC_TCTL_AUTONEGENBL);
1326 				dc_apply_fixup(sc,
1327 				    (media & IFM_GMASK) == IFM_FDX ?
1328 				    IFM_10_T|IFM_FDX : IFM_10_T);
1329 				DELAY(20000);
1330 			}
1331 		}
1332 	}
1333 
1334 	/*
1335 	 * If this is a Davicom DM9102A card with a DM9801 HomePNA
1336 	 * PHY and we want HomePNA mode, set the portsel bit to turn
1337 	 * on the external MII port.
1338 	 */
1339 	if (DC_IS_DAVICOM(sc)) {
1340 		if (IFM_SUBTYPE(media) == IFM_HPNA_1) {
1341 			DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_PORTSEL);
1342 			sc->dc_link = 1;
1343 		} else {
1344 			DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_PORTSEL);
1345 		}
1346 	}
1347 
1348 	if ((media & IFM_GMASK) == IFM_FDX) {
1349 		DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_FULLDUPLEX);
1350 		if (sc->dc_pmode == DC_PMODE_SYM && DC_IS_PNIC(sc))
1351 			DC_SETBIT(sc, DC_PN_NWAY, DC_PN_NWAY_DUPLEX);
1352 	} else {
1353 		DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_FULLDUPLEX);
1354 		if (sc->dc_pmode == DC_PMODE_SYM && DC_IS_PNIC(sc))
1355 			DC_CLRBIT(sc, DC_PN_NWAY, DC_PN_NWAY_DUPLEX);
1356 	}
1357 
1358 	if (restart)
1359 		DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_TX_ON|DC_NETCFG_RX_ON);
1360 }
1361 
1362 void
1363 dc_reset(struct dc_softc *sc)
1364 {
1365 	int i;
1366 
1367 	DC_SETBIT(sc, DC_BUSCTL, DC_BUSCTL_RESET);
1368 
1369 	for (i = 0; i < DC_TIMEOUT; i++) {
1370 		DELAY(10);
1371 		if (!(CSR_READ_4(sc, DC_BUSCTL) & DC_BUSCTL_RESET))
1372 			break;
1373 	}
1374 
1375 	if (DC_IS_ASIX(sc) || DC_IS_ADMTEK(sc) || DC_IS_XIRCOM(sc) ||
1376 	    DC_IS_INTEL(sc) || DC_IS_CONEXANT(sc)) {
1377 		DELAY(10000);
1378 		DC_CLRBIT(sc, DC_BUSCTL, DC_BUSCTL_RESET);
1379 		i = 0;
1380 	}
1381 
1382 	if (i == DC_TIMEOUT)
1383 		printf("%s: reset never completed!\n", sc->sc_dev.dv_xname);
1384 
1385 	/* Wait a little while for the chip to get its brains in order. */
1386 	DELAY(1000);
1387 
1388 	CSR_WRITE_4(sc, DC_IMR, 0x00000000);
1389 	CSR_WRITE_4(sc, DC_BUSCTL, 0x00000000);
1390 	CSR_WRITE_4(sc, DC_NETCFG, 0x00000000);
1391 
1392 	/*
1393 	 * Bring the SIA out of reset. In some cases, it looks
1394 	 * like failing to unreset the SIA soon enough gets it
1395 	 * into a state where it will never come out of reset
1396 	 * until we reset the whole chip again.
1397 	 */
1398 	if (DC_IS_INTEL(sc)) {
1399 		DC_SETBIT(sc, DC_SIARESET, DC_SIA_RESET);
1400 		CSR_WRITE_4(sc, DC_10BTCTRL, 0);
1401 		CSR_WRITE_4(sc, DC_WATCHDOG, 0);
1402 	}
1403 
1404 	if (sc->dc_type == DC_TYPE_21145)
1405 		dc_setcfg(sc, IFM_10_T);
1406 }
1407 
1408 void
1409 dc_apply_fixup(struct dc_softc *sc, int media)
1410 {
1411 	struct dc_mediainfo *m;
1412 	u_int8_t *p;
1413 	int i;
1414 	u_int32_t reg;
1415 
1416 	m = sc->dc_mi;
1417 
1418 	while (m != NULL) {
1419 		if (m->dc_media == media)
1420 			break;
1421 		m = m->dc_next;
1422 	}
1423 
1424 	if (m == NULL)
1425 		return;
1426 
1427 	for (i = 0, p = m->dc_reset_ptr; i < m->dc_reset_len; i++, p += 2) {
1428 		reg = (p[0] | (p[1] << 8)) << 16;
1429 		CSR_WRITE_4(sc, DC_WATCHDOG, reg);
1430 	}
1431 
1432 	for (i = 0, p = m->dc_gp_ptr; i < m->dc_gp_len; i++, p += 2) {
1433 		reg = (p[0] | (p[1] << 8)) << 16;
1434 		CSR_WRITE_4(sc, DC_WATCHDOG, reg);
1435 	}
1436 }
1437 
1438 void
1439 dc_decode_leaf_sia(struct dc_softc *sc, struct dc_eblock_sia *l)
1440 {
1441 	struct dc_mediainfo *m;
1442 
1443 	m = malloc(sizeof(*m), M_DEVBUF, M_NOWAIT | M_ZERO);
1444 	if (m == NULL)
1445 		return;
1446 	switch (l->dc_sia_code & ~DC_SIA_CODE_EXT) {
1447 	case DC_SIA_CODE_10BT:
1448 		m->dc_media = IFM_10_T;
1449 		break;
1450 	case DC_SIA_CODE_10BT_FDX:
1451 		m->dc_media = IFM_10_T|IFM_FDX;
1452 		break;
1453 	case DC_SIA_CODE_10B2:
1454 		m->dc_media = IFM_10_2;
1455 		break;
1456 	case DC_SIA_CODE_10B5:
1457 		m->dc_media = IFM_10_5;
1458 		break;
1459 	default:
1460 		break;
1461 	}
1462 
1463 	/*
1464 	 * We need to ignore CSR13, CSR14, CSR15 for SIA mode.
1465 	 * Things apparently already work for cards that do
1466 	 * supply Media Specific Data.
1467 	 */
1468 	if (l->dc_sia_code & DC_SIA_CODE_EXT) {
1469 		m->dc_gp_len = 2;
1470 		m->dc_gp_ptr =
1471 		(u_int8_t *)&l->dc_un.dc_sia_ext.dc_sia_gpio_ctl;
1472 	} else {
1473 		m->dc_gp_len = 2;
1474 		m->dc_gp_ptr =
1475 		(u_int8_t *)&l->dc_un.dc_sia_noext.dc_sia_gpio_ctl;
1476 	}
1477 
1478 	m->dc_next = sc->dc_mi;
1479 	sc->dc_mi = m;
1480 
1481 	sc->dc_pmode = DC_PMODE_SIA;
1482 }
1483 
1484 void
1485 dc_decode_leaf_sym(struct dc_softc *sc, struct dc_eblock_sym *l)
1486 {
1487 	struct dc_mediainfo *m;
1488 
1489 	m = malloc(sizeof(*m), M_DEVBUF, M_NOWAIT | M_ZERO);
1490 	if (m == NULL)
1491 		return;
1492 	if (l->dc_sym_code == DC_SYM_CODE_100BT)
1493 		m->dc_media = IFM_100_TX;
1494 
1495 	if (l->dc_sym_code == DC_SYM_CODE_100BT_FDX)
1496 		m->dc_media = IFM_100_TX|IFM_FDX;
1497 
1498 	m->dc_gp_len = 2;
1499 	m->dc_gp_ptr = (u_int8_t *)&l->dc_sym_gpio_ctl;
1500 
1501 	m->dc_next = sc->dc_mi;
1502 	sc->dc_mi = m;
1503 
1504 	sc->dc_pmode = DC_PMODE_SYM;
1505 }
1506 
1507 void
1508 dc_decode_leaf_mii(struct dc_softc *sc, struct dc_eblock_mii *l)
1509 {
1510 	u_int8_t *p;
1511 	struct dc_mediainfo *m;
1512 
1513 	m = malloc(sizeof(*m), M_DEVBUF, M_NOWAIT | M_ZERO);
1514 	if (m == NULL)
1515 		return;
1516 	/* We abuse IFM_AUTO to represent MII. */
1517 	m->dc_media = IFM_AUTO;
1518 	m->dc_gp_len = l->dc_gpr_len;
1519 
1520 	p = (u_int8_t *)l;
1521 	p += sizeof(struct dc_eblock_mii);
1522 	m->dc_gp_ptr = p;
1523 	p += 2 * l->dc_gpr_len;
1524 	m->dc_reset_len = *p;
1525 	p++;
1526 	m->dc_reset_ptr = p;
1527 
1528 	m->dc_next = sc->dc_mi;
1529 	sc->dc_mi = m;
1530 }
1531 
1532 void
1533 dc_read_srom(struct dc_softc *sc, int bits)
1534 {
1535 	int size;
1536 
1537 	size = 2 << bits;
1538 	sc->dc_srom = malloc(size, M_DEVBUF, M_NOWAIT);
1539 	if (sc->dc_srom == NULL)
1540 		return;
1541 	dc_read_eeprom(sc, (caddr_t)sc->dc_srom, 0, (size / 2), 0);
1542 }
1543 
1544 void
1545 dc_parse_21143_srom(struct dc_softc *sc)
1546 {
1547 	struct dc_leaf_hdr *lhdr;
1548 	struct dc_eblock_hdr *hdr;
1549 	int have_mii, i, loff;
1550 	char *ptr;
1551 
1552 	have_mii = 0;
1553 	loff = sc->dc_srom[27];
1554 	lhdr = (struct dc_leaf_hdr *)&(sc->dc_srom[loff]);
1555 
1556 	ptr = (char *)lhdr;
1557 	ptr += sizeof(struct dc_leaf_hdr) - 1;
1558 	/*
1559 	 * Look if we got a MII media block.
1560 	 */
1561 	for (i = 0; i < lhdr->dc_mcnt; i++) {
1562 		hdr = (struct dc_eblock_hdr *)ptr;
1563 		if (hdr->dc_type == DC_EBLOCK_MII)
1564 		    have_mii++;
1565 
1566 		ptr += (hdr->dc_len & 0x7F);
1567 		ptr++;
1568 	}
1569 
1570 	/*
1571 	 * Do the same thing again. Only use SIA and SYM media
1572 	 * blocks if no MII media block is available.
1573 	 */
1574 	ptr = (char *)lhdr;
1575 	ptr += sizeof(struct dc_leaf_hdr) - 1;
1576 	for (i = 0; i < lhdr->dc_mcnt; i++) {
1577 		hdr = (struct dc_eblock_hdr *)ptr;
1578 		switch(hdr->dc_type) {
1579 		case DC_EBLOCK_MII:
1580 			dc_decode_leaf_mii(sc, (struct dc_eblock_mii *)hdr);
1581 			break;
1582 		case DC_EBLOCK_SIA:
1583 			if (! have_mii)
1584 			    dc_decode_leaf_sia(sc,
1585 				(struct dc_eblock_sia *)hdr);
1586 			break;
1587 		case DC_EBLOCK_SYM:
1588 			if (! have_mii)
1589 			    dc_decode_leaf_sym(sc,
1590 				(struct dc_eblock_sym *)hdr);
1591 			break;
1592 		default:
1593 			/* Don't care. Yet. */
1594 			break;
1595 		}
1596 		ptr += (hdr->dc_len & 0x7F);
1597 		ptr++;
1598 	}
1599 }
1600 
1601 /*
1602  * Attach the interface. Allocate softc structures, do ifmedia
1603  * setup and ethernet/BPF attach.
1604  */
1605 void
1606 dc_attach(struct dc_softc *sc)
1607 {
1608 	struct ifnet *ifp;
1609 	int mac_offset, tmp, i;
1610 	u_int32_t reg;
1611 
1612 	/*
1613 	 * Get station address from the EEPROM.
1614 	 */
1615 	if (sc->sc_hasmac)
1616 		goto hasmac;
1617 
1618 	switch(sc->dc_type) {
1619 	case DC_TYPE_98713:
1620 	case DC_TYPE_98713A:
1621 	case DC_TYPE_987x5:
1622 	case DC_TYPE_PNICII:
1623 		dc_read_eeprom(sc, (caddr_t)&mac_offset,
1624 		    (DC_EE_NODEADDR_OFFSET / 2), 1, 0);
1625 		dc_read_eeprom(sc, (caddr_t)&sc->sc_arpcom.ac_enaddr,
1626 		    (mac_offset / 2), 3, 0);
1627 		break;
1628 	case DC_TYPE_PNIC:
1629 		dc_read_eeprom(sc, (caddr_t)&sc->sc_arpcom.ac_enaddr, 0, 3, 1);
1630 		break;
1631 	case DC_TYPE_DM9102:
1632 	case DC_TYPE_21143:
1633 	case DC_TYPE_21145:
1634 	case DC_TYPE_ASIX:
1635 		dc_read_eeprom(sc, (caddr_t)&sc->sc_arpcom.ac_enaddr,
1636 		    DC_EE_NODEADDR, 3, 0);
1637 		break;
1638 	case DC_TYPE_AL981:
1639 	case DC_TYPE_AN983:
1640 		reg = CSR_READ_4(sc, DC_AL_PAR0);
1641 		sc->sc_arpcom.ac_enaddr[0] = (reg & 0xff);
1642 		sc->sc_arpcom.ac_enaddr[1] = (reg >> 8) & 0xff;
1643 		sc->sc_arpcom.ac_enaddr[2] = (reg >> 16) & 0xff;
1644 		sc->sc_arpcom.ac_enaddr[3] = (reg >> 24) & 0xff;
1645 		reg = CSR_READ_4(sc, DC_AL_PAR1);
1646 		sc->sc_arpcom.ac_enaddr[4] = (reg & 0xff);
1647 		sc->sc_arpcom.ac_enaddr[5] = (reg >> 8) & 0xff;
1648 		break;
1649 	case DC_TYPE_CONEXANT:
1650 		bcopy(&sc->dc_srom + DC_CONEXANT_EE_NODEADDR,
1651 		    &sc->sc_arpcom.ac_enaddr, ETHER_ADDR_LEN);
1652 		break;
1653 	case DC_TYPE_XIRCOM:
1654 		break;
1655 	default:
1656 		dc_read_eeprom(sc, (caddr_t)&sc->sc_arpcom.ac_enaddr,
1657 		    DC_EE_NODEADDR, 3, 0);
1658 		break;
1659 	}
1660 hasmac:
1661 
1662 	if (bus_dmamem_alloc(sc->sc_dmat, sizeof(struct dc_list_data),
1663 	    PAGE_SIZE, 0, sc->sc_listseg, 1, &sc->sc_listnseg,
1664 	    BUS_DMA_NOWAIT) != 0) {
1665 		printf(": can't alloc list mem\n");
1666 		goto fail;
1667 	}
1668 	if (bus_dmamem_map(sc->sc_dmat, sc->sc_listseg, sc->sc_listnseg,
1669 	    sizeof(struct dc_list_data), &sc->sc_listkva,
1670 	    BUS_DMA_NOWAIT) != 0) {
1671 		printf(": can't map list mem\n");
1672 		goto fail;
1673 	}
1674 	if (bus_dmamap_create(sc->sc_dmat, sizeof(struct dc_list_data), 1,
1675 	    sizeof(struct dc_list_data), 0, BUS_DMA_NOWAIT,
1676 	    &sc->sc_listmap) != 0) {
1677 		printf(": can't alloc list map\n");
1678 		goto fail;
1679 	}
1680 	if (bus_dmamap_load(sc->sc_dmat, sc->sc_listmap, sc->sc_listkva,
1681 	    sizeof(struct dc_list_data), NULL, BUS_DMA_NOWAIT) != 0) {
1682 		printf(": can't load list map\n");
1683 		goto fail;
1684 	}
1685 	sc->dc_ldata = (struct dc_list_data *)sc->sc_listkva;
1686 	bzero(sc->dc_ldata, sizeof(struct dc_list_data));
1687 
1688 	for (i = 0; i < DC_RX_LIST_CNT; i++) {
1689 		if (bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, MCLBYTES,
1690 		    0, BUS_DMA_NOWAIT,
1691 		    &sc->dc_cdata.dc_rx_chain[i].sd_map) != 0) {
1692 			printf(": can't create rx map\n");
1693 			return;
1694 		}
1695 	}
1696 	if (bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, MCLBYTES, 0,
1697 	    BUS_DMA_NOWAIT, &sc->sc_rx_sparemap) != 0) {
1698 		printf(": can't create rx spare map\n");
1699 		return;
1700 	}
1701 
1702 	for (i = 0; i < DC_TX_LIST_CNT; i++) {
1703 		if (bus_dmamap_create(sc->sc_dmat, MCLBYTES,
1704 		    DC_TX_LIST_CNT - 5, MCLBYTES, 0, BUS_DMA_NOWAIT,
1705 		    &sc->dc_cdata.dc_tx_chain[i].sd_map) != 0) {
1706 			printf(": can't create tx map\n");
1707 			return;
1708 		}
1709 	}
1710 	if (bus_dmamap_create(sc->sc_dmat, MCLBYTES, DC_TX_LIST_CNT - 5,
1711 	    MCLBYTES, 0, BUS_DMA_NOWAIT, &sc->sc_tx_sparemap) != 0) {
1712 		printf(": can't create tx spare map\n");
1713 		return;
1714 	}
1715 
1716 	/*
1717 	 * A 21143 or clone chip was detected. Inform the world.
1718 	 */
1719 	printf(", address %s\n", ether_sprintf(sc->sc_arpcom.ac_enaddr));
1720 
1721 	ifp = &sc->sc_arpcom.ac_if;
1722 	ifp->if_softc = sc;
1723 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1724 	ifp->if_ioctl = dc_ioctl;
1725 	ifp->if_start = dc_start;
1726 	ifp->if_watchdog = dc_watchdog;
1727 	ifp->if_baudrate = 10000000;
1728 	IFQ_SET_MAXLEN(&ifp->if_snd, DC_TX_LIST_CNT - 1);
1729 	IFQ_SET_READY(&ifp->if_snd);
1730 	bcopy(sc->sc_dev.dv_xname, ifp->if_xname, IFNAMSIZ);
1731 
1732 	ifp->if_capabilities = IFCAP_VLAN_MTU;
1733 
1734 	/* Do MII setup. If this is a 21143, check for a PHY on the
1735 	 * MII bus after applying any necessary fixups to twiddle the
1736 	 * GPIO bits. If we don't end up finding a PHY, restore the
1737 	 * old selection (SIA only or SIA/SYM) and attach the dcphy
1738 	 * driver instead.
1739 	 */
1740 	if (DC_IS_INTEL(sc)) {
1741 		dc_apply_fixup(sc, IFM_AUTO);
1742 		tmp = sc->dc_pmode;
1743 		sc->dc_pmode = DC_PMODE_MII;
1744 	}
1745 
1746 	/*
1747 	 * Setup General Purpose port mode and data so the tulip can talk
1748 	 * to the MII.  This needs to be done before mii_attach so that
1749 	 * we can actually see them.
1750 	 */
1751 	if (DC_IS_XIRCOM(sc)) {
1752 		CSR_WRITE_4(sc, DC_SIAGP, DC_SIAGP_WRITE_EN | DC_SIAGP_INT1_EN |
1753 		    DC_SIAGP_MD_GP2_OUTPUT | DC_SIAGP_MD_GP0_OUTPUT);
1754 		DELAY(10);
1755 		CSR_WRITE_4(sc, DC_SIAGP, DC_SIAGP_INT1_EN |
1756 		    DC_SIAGP_MD_GP2_OUTPUT | DC_SIAGP_MD_GP0_OUTPUT);
1757 		DELAY(10);
1758 	}
1759 
1760 	sc->sc_mii.mii_ifp = ifp;
1761 	sc->sc_mii.mii_readreg = dc_miibus_readreg;
1762 	sc->sc_mii.mii_writereg = dc_miibus_writereg;
1763 	sc->sc_mii.mii_statchg = dc_miibus_statchg;
1764 	ifmedia_init(&sc->sc_mii.mii_media, 0, dc_ifmedia_upd, dc_ifmedia_sts);
1765 	mii_attach(&sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
1766 	    MII_OFFSET_ANY, 0);
1767 
1768 	if (DC_IS_INTEL(sc)) {
1769 		if (LIST_EMPTY(&sc->sc_mii.mii_phys)) {
1770 			sc->dc_pmode = tmp;
1771 			if (sc->dc_pmode != DC_PMODE_SIA)
1772 				sc->dc_pmode = DC_PMODE_SYM;
1773 			sc->dc_flags |= DC_21143_NWAY;
1774 			if (sc->dc_flags & DC_MOMENCO_BOTCH)
1775 				sc->dc_pmode = DC_PMODE_MII;
1776 			mii_attach(&sc->sc_dev, &sc->sc_mii, 0xffffffff,
1777 			    MII_PHY_ANY, MII_OFFSET_ANY, 0);
1778 		} else {
1779 			/* we have a PHY, so we must clear this bit */
1780 			sc->dc_flags &= ~DC_TULIP_LEDS;
1781 		}
1782 	}
1783 
1784 	if (LIST_EMPTY(&sc->sc_mii.mii_phys)) {
1785 		ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE, 0, NULL);
1786 		ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE);
1787 		printf("%s: MII without any PHY!\n", sc->sc_dev.dv_xname);
1788 	} else if (sc->dc_type == DC_TYPE_21145) {
1789 		ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_10_T);
1790 	} else
1791 		ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_AUTO);
1792 
1793 	if (DC_IS_DAVICOM(sc) && sc->dc_revision >= DC_REVISION_DM9102A)
1794 		ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|IFM_HPNA_1,0,NULL);
1795 
1796 	if (DC_IS_ADMTEK(sc)) {
1797 		/*
1798 		 * Set automatic TX underrun recovery for the ADMtek chips
1799 		 */
1800 		DC_SETBIT(sc, DC_AL_CR, DC_AL_CR_ATUR);
1801 	}
1802 
1803 	/*
1804 	 * Call MI attach routines.
1805 	 */
1806 	if_attach(ifp);
1807 	ether_ifattach(ifp);
1808 
1809 	sc->sc_dhook = shutdownhook_establish(dc_shutdown, sc);
1810 	sc->sc_pwrhook = powerhook_establish(dc_power, sc);
1811 
1812 fail:
1813 	return;
1814 }
1815 
1816 /*
1817  * Initialize the transmit descriptors.
1818  */
1819 int
1820 dc_list_tx_init(struct dc_softc *sc)
1821 {
1822 	struct dc_chain_data *cd;
1823 	struct dc_list_data *ld;
1824 	int i;
1825 	bus_addr_t next;
1826 
1827 	cd = &sc->dc_cdata;
1828 	ld = sc->dc_ldata;
1829 	for (i = 0; i < DC_TX_LIST_CNT; i++) {
1830 		next = sc->sc_listmap->dm_segs[0].ds_addr;
1831 		if (i == (DC_TX_LIST_CNT - 1))
1832 			next +=
1833 			    offsetof(struct dc_list_data, dc_tx_list[0]);
1834 		else
1835 			next +=
1836 			    offsetof(struct dc_list_data, dc_tx_list[i + 1]);
1837 		cd->dc_tx_chain[i].sd_mbuf = NULL;
1838 		ld->dc_tx_list[i].dc_data = htole32(0);
1839 		ld->dc_tx_list[i].dc_ctl = htole32(0);
1840 		ld->dc_tx_list[i].dc_next = htole32(next);
1841 	}
1842 
1843 	cd->dc_tx_prod = cd->dc_tx_cons = cd->dc_tx_cnt = 0;
1844 
1845 	return (0);
1846 }
1847 
1848 
1849 /*
1850  * Initialize the RX descriptors and allocate mbufs for them. Note that
1851  * we arrange the descriptors in a closed ring, so that the last descriptor
1852  * points back to the first.
1853  */
1854 int
1855 dc_list_rx_init(struct dc_softc *sc)
1856 {
1857 	struct dc_chain_data *cd;
1858 	struct dc_list_data *ld;
1859 	int i;
1860 	bus_addr_t next;
1861 
1862 	cd = &sc->dc_cdata;
1863 	ld = sc->dc_ldata;
1864 
1865 	for (i = 0; i < DC_RX_LIST_CNT; i++) {
1866 		if (dc_newbuf(sc, i, NULL) == ENOBUFS)
1867 			return (ENOBUFS);
1868 		next = sc->sc_listmap->dm_segs[0].ds_addr;
1869 		if (i == (DC_RX_LIST_CNT - 1))
1870 			next +=
1871 			    offsetof(struct dc_list_data, dc_rx_list[0]);
1872 		else
1873 			next +=
1874 			    offsetof(struct dc_list_data, dc_rx_list[i + 1]);
1875 		ld->dc_rx_list[i].dc_next = htole32(next);
1876 	}
1877 
1878 	cd->dc_rx_prod = 0;
1879 
1880 	return (0);
1881 }
1882 
1883 /*
1884  * Initialize an RX descriptor and attach an MBUF cluster.
1885  */
1886 int
1887 dc_newbuf(struct dc_softc *sc, int i, struct mbuf *m)
1888 {
1889 	struct mbuf *m_new = NULL;
1890 	struct dc_desc *c;
1891 	bus_dmamap_t map;
1892 
1893 	c = &sc->dc_ldata->dc_rx_list[i];
1894 
1895 	if (m == NULL) {
1896 		MGETHDR(m_new, M_DONTWAIT, MT_DATA);
1897 		if (m_new == NULL)
1898 			return (ENOBUFS);
1899 
1900 		MCLGET(m_new, M_DONTWAIT);
1901 		if (!(m_new->m_flags & M_EXT)) {
1902 			m_freem(m_new);
1903 			return (ENOBUFS);
1904 		}
1905 		m_new->m_len = m_new->m_pkthdr.len = MCLBYTES;
1906 		if (bus_dmamap_load_mbuf(sc->sc_dmat, sc->sc_rx_sparemap,
1907 		    m_new, BUS_DMA_NOWAIT) != 0) {
1908 			m_freem(m_new);
1909 			return (ENOBUFS);
1910 		}
1911 		map = sc->dc_cdata.dc_rx_chain[i].sd_map;
1912 		sc->dc_cdata.dc_rx_chain[i].sd_map = sc->sc_rx_sparemap;
1913 		sc->sc_rx_sparemap = map;
1914 	} else {
1915 		/*
1916 		 * We're re-using a previously allocated mbuf;
1917 		 * be sure to re-init pointers and lengths to
1918 		 * default values.
1919 		 */
1920 		m_new = m;
1921 		m_new->m_len = m_new->m_pkthdr.len = MCLBYTES;
1922 		m_new->m_data = m_new->m_ext.ext_buf;
1923 	}
1924 
1925 	m_adj(m_new, sizeof(u_int64_t));
1926 
1927 	/*
1928 	 * If this is a PNIC chip, zero the buffer. This is part
1929 	 * of the workaround for the receive bug in the 82c168 and
1930 	 * 82c169 chips.
1931 	 */
1932 	if (sc->dc_flags & DC_PNIC_RX_BUG_WAR)
1933 		bzero((char *)mtod(m_new, char *), m_new->m_len);
1934 
1935 	bus_dmamap_sync(sc->sc_dmat, sc->dc_cdata.dc_rx_chain[i].sd_map, 0,
1936 	    sc->dc_cdata.dc_rx_chain[i].sd_map->dm_mapsize,
1937 	    BUS_DMASYNC_PREREAD);
1938 
1939 	sc->dc_cdata.dc_rx_chain[i].sd_mbuf = m_new;
1940 	c->dc_data = htole32(
1941 	    sc->dc_cdata.dc_rx_chain[i].sd_map->dm_segs[0].ds_addr +
1942 	    sizeof(u_int64_t));
1943 	c->dc_ctl = htole32(DC_RXCTL_RLINK | ETHER_MAX_DIX_LEN);
1944 	c->dc_status = htole32(DC_RXSTAT_OWN);
1945 
1946 	bus_dmamap_sync(sc->sc_dmat, sc->sc_listmap,
1947 	    offsetof(struct dc_list_data, dc_rx_list[i]),
1948 	    sizeof(struct dc_desc),
1949 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1950 
1951 	return (0);
1952 }
1953 
1954 /*
1955  * Grrrrr.
1956  * The PNIC chip has a terrible bug in it that manifests itself during
1957  * periods of heavy activity. The exact mode of failure if difficult to
1958  * pinpoint: sometimes it only happens in promiscuous mode, sometimes it
1959  * will happen on slow machines. The bug is that sometimes instead of
1960  * uploading one complete frame during reception, it uploads what looks
1961  * like the entire contents of its FIFO memory. The frame we want is at
1962  * the end of the whole mess, but we never know exactly how much data has
1963  * been uploaded, so salvaging the frame is hard.
1964  *
1965  * There is only one way to do it reliably, and it's disgusting.
1966  * Here's what we know:
1967  *
1968  * - We know there will always be somewhere between one and three extra
1969  *   descriptors uploaded.
1970  *
1971  * - We know the desired received frame will always be at the end of the
1972  *   total data upload.
1973  *
1974  * - We know the size of the desired received frame because it will be
1975  *   provided in the length field of the status word in the last descriptor.
1976  *
1977  * Here's what we do:
1978  *
1979  * - When we allocate buffers for the receive ring, we bzero() them.
1980  *   This means that we know that the buffer contents should be all
1981  *   zeros, except for data uploaded by the chip.
1982  *
1983  * - We also force the PNIC chip to upload frames that include the
1984  *   ethernet CRC at the end.
1985  *
1986  * - We gather all of the bogus frame data into a single buffer.
1987  *
1988  * - We then position a pointer at the end of this buffer and scan
1989  *   backwards until we encounter the first non-zero byte of data.
1990  *   This is the end of the received frame. We know we will encounter
1991  *   some data at the end of the frame because the CRC will always be
1992  *   there, so even if the sender transmits a packet of all zeros,
1993  *   we won't be fooled.
1994  *
1995  * - We know the size of the actual received frame, so we subtract
1996  *   that value from the current pointer location. This brings us
1997  *   to the start of the actual received packet.
1998  *
1999  * - We copy this into an mbuf and pass it on, along with the actual
2000  *   frame length.
2001  *
2002  * The performance hit is tremendous, but it beats dropping frames all
2003  * the time.
2004  */
2005 
2006 #define DC_WHOLEFRAME	(DC_RXSTAT_FIRSTFRAG|DC_RXSTAT_LASTFRAG)
2007 void
2008 dc_pnic_rx_bug_war(struct dc_softc *sc, int idx)
2009 {
2010 	struct dc_desc		*cur_rx;
2011 	struct dc_desc		*c = NULL;
2012 	struct mbuf		*m = NULL;
2013 	unsigned char		*ptr;
2014 	int			i, total_len;
2015 	u_int32_t		rxstat = 0;
2016 
2017 	i = sc->dc_pnic_rx_bug_save;
2018 	cur_rx = &sc->dc_ldata->dc_rx_list[idx];
2019 	ptr = sc->dc_pnic_rx_buf;
2020 	bzero(ptr, ETHER_MAX_DIX_LEN * 5);
2021 
2022 	/* Copy all the bytes from the bogus buffers. */
2023 	while (1) {
2024 		c = &sc->dc_ldata->dc_rx_list[i];
2025 		rxstat = letoh32(c->dc_status);
2026 		m = sc->dc_cdata.dc_rx_chain[i].sd_mbuf;
2027 		bcopy(mtod(m, char *), ptr, ETHER_MAX_DIX_LEN);
2028 		ptr += ETHER_MAX_DIX_LEN;
2029 		/* If this is the last buffer, break out. */
2030 		if (i == idx || rxstat & DC_RXSTAT_LASTFRAG)
2031 			break;
2032 		dc_newbuf(sc, i, m);
2033 		DC_INC(i, DC_RX_LIST_CNT);
2034 	}
2035 
2036 	/* Find the length of the actual receive frame. */
2037 	total_len = DC_RXBYTES(rxstat);
2038 
2039 	/* Scan backwards until we hit a non-zero byte. */
2040 	while(*ptr == 0x00)
2041 		ptr--;
2042 
2043 	/* Round off. */
2044 	if ((unsigned long)(ptr) & 0x3)
2045 		ptr -= 1;
2046 
2047 	/* Now find the start of the frame. */
2048 	ptr -= total_len;
2049 	if (ptr < sc->dc_pnic_rx_buf)
2050 		ptr = sc->dc_pnic_rx_buf;
2051 
2052 	/*
2053 	 * Now copy the salvaged frame to the last mbuf and fake up
2054 	 * the status word to make it look like a successful
2055  	 * frame reception.
2056 	 */
2057 	dc_newbuf(sc, i, m);
2058 	bcopy(ptr, mtod(m, char *), total_len);
2059 	cur_rx->dc_status = htole32(rxstat | DC_RXSTAT_FIRSTFRAG);
2060 }
2061 
2062 /*
2063  * This routine searches the RX ring for dirty descriptors in the
2064  * event that the rxeof routine falls out of sync with the chip's
2065  * current descriptor pointer. This may happen sometimes as a result
2066  * of a "no RX buffer available" condition that happens when the chip
2067  * consumes all of the RX buffers before the driver has a chance to
2068  * process the RX ring. This routine may need to be called more than
2069  * once to bring the driver back in sync with the chip, however we
2070  * should still be getting RX DONE interrupts to drive the search
2071  * for new packets in the RX ring, so we should catch up eventually.
2072  */
2073 int
2074 dc_rx_resync(struct dc_softc *sc)
2075 {
2076 	u_int32_t stat;
2077 	int i, pos, offset;
2078 
2079 	pos = sc->dc_cdata.dc_rx_prod;
2080 
2081 	for (i = 0; i < DC_RX_LIST_CNT; i++) {
2082 
2083 		offset = offsetof(struct dc_list_data, dc_rx_list[pos]);
2084 		bus_dmamap_sync(sc->sc_dmat, sc->sc_listmap,
2085 		    offset, sizeof(struct dc_desc),
2086 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2087 
2088 		stat = sc->dc_ldata->dc_rx_list[pos].dc_status;
2089 		if (!(stat & htole32(DC_RXSTAT_OWN)))
2090 			break;
2091 		DC_INC(pos, DC_RX_LIST_CNT);
2092 	}
2093 
2094 	/* If the ring really is empty, then just return. */
2095 	if (i == DC_RX_LIST_CNT)
2096 		return (0);
2097 
2098 	/* We've fallen behind the chip: catch it. */
2099 	sc->dc_cdata.dc_rx_prod = pos;
2100 
2101 	return (EAGAIN);
2102 }
2103 
2104 /*
2105  * A frame has been uploaded: pass the resulting mbuf chain up to
2106  * the higher level protocols.
2107  */
2108 void
2109 dc_rxeof(struct dc_softc *sc)
2110 {
2111 	struct mbuf *m;
2112 	struct ifnet *ifp;
2113 	struct dc_desc *cur_rx;
2114 	int i, offset, total_len = 0;
2115 	u_int32_t rxstat;
2116 
2117 	ifp = &sc->sc_arpcom.ac_if;
2118 	i = sc->dc_cdata.dc_rx_prod;
2119 
2120 	for(;;) {
2121 		struct mbuf	*m0 = NULL;
2122 
2123 		offset = offsetof(struct dc_list_data, dc_rx_list[i]);
2124 		bus_dmamap_sync(sc->sc_dmat, sc->sc_listmap,
2125 		    offset, sizeof(struct dc_desc),
2126 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2127 
2128 		cur_rx = &sc->dc_ldata->dc_rx_list[i];
2129 		rxstat = letoh32(cur_rx->dc_status);
2130 		if (rxstat & DC_RXSTAT_OWN)
2131 			break;
2132 
2133 		m = sc->dc_cdata.dc_rx_chain[i].sd_mbuf;
2134 		total_len = DC_RXBYTES(rxstat);
2135 
2136 		bus_dmamap_sync(sc->sc_dmat, sc->dc_cdata.dc_rx_chain[i].sd_map,
2137 		    0, sc->dc_cdata.dc_rx_chain[i].sd_map->dm_mapsize,
2138 		    BUS_DMASYNC_POSTREAD);
2139 
2140 		if (sc->dc_flags & DC_PNIC_RX_BUG_WAR) {
2141 			if ((rxstat & DC_WHOLEFRAME) != DC_WHOLEFRAME) {
2142 				if (rxstat & DC_RXSTAT_FIRSTFRAG)
2143 					sc->dc_pnic_rx_bug_save = i;
2144 				if ((rxstat & DC_RXSTAT_LASTFRAG) == 0) {
2145 					DC_INC(i, DC_RX_LIST_CNT);
2146 					continue;
2147 				}
2148 				dc_pnic_rx_bug_war(sc, i);
2149 				rxstat = letoh32(cur_rx->dc_status);
2150 				total_len = DC_RXBYTES(rxstat);
2151 			}
2152 		}
2153 
2154 		sc->dc_cdata.dc_rx_chain[i].sd_mbuf = NULL;
2155 
2156 		/*
2157 		 * If an error occurs, update stats, clear the
2158 		 * status word and leave the mbuf cluster in place:
2159 		 * it should simply get re-used next time this descriptor
2160 		 * comes up in the ring.  However, don't report long
2161 		 * frames as errors since they could be VLANs.
2162 		 */
2163 		if ((rxstat & DC_RXSTAT_RXERR)) {
2164 			if (!(rxstat & DC_RXSTAT_GIANT) ||
2165 			    (rxstat & (DC_RXSTAT_CRCERR | DC_RXSTAT_DRIBBLE |
2166 				       DC_RXSTAT_MIIERE | DC_RXSTAT_COLLSEEN |
2167 				       DC_RXSTAT_RUNT   | DC_RXSTAT_DE))) {
2168 				ifp->if_ierrors++;
2169 				if (rxstat & DC_RXSTAT_COLLSEEN)
2170 					ifp->if_collisions++;
2171 				dc_newbuf(sc, i, m);
2172 				if (rxstat & DC_RXSTAT_CRCERR) {
2173 					DC_INC(i, DC_RX_LIST_CNT);
2174 					continue;
2175 				} else {
2176 					dc_init(sc);
2177 					return;
2178 				}
2179 			}
2180 		}
2181 
2182 		/* No errors; receive the packet. */
2183 		total_len -= ETHER_CRC_LEN;
2184 
2185 		m->m_pkthdr.rcvif = ifp;
2186 		m0 = m_devget(mtod(m, char *), total_len, ETHER_ALIGN,
2187 		    ifp, NULL);
2188 		dc_newbuf(sc, i, m);
2189 		DC_INC(i, DC_RX_LIST_CNT);
2190 		if (m0 == NULL) {
2191 			ifp->if_ierrors++;
2192 			continue;
2193 		}
2194 		m = m0;
2195 
2196 		ifp->if_ipackets++;
2197 #if NBPFILTER > 0
2198 		if (ifp->if_bpf)
2199 			bpf_mtap(ifp->if_bpf, m, BPF_DIRECTION_IN);
2200 #endif
2201 		ether_input_mbuf(ifp, m);
2202 	}
2203 
2204 	sc->dc_cdata.dc_rx_prod = i;
2205 }
2206 
2207 /*
2208  * A frame was downloaded to the chip. It's safe for us to clean up
2209  * the list buffers.
2210  */
2211 
2212 void
2213 dc_txeof(struct dc_softc *sc)
2214 {
2215 	struct dc_desc *cur_tx = NULL;
2216 	struct ifnet *ifp;
2217 	int idx, offset;
2218 
2219 	ifp = &sc->sc_arpcom.ac_if;
2220 
2221 	/*
2222 	 * Go through our tx list and free mbufs for those
2223 	 * frames that have been transmitted.
2224 	 */
2225 	idx = sc->dc_cdata.dc_tx_cons;
2226 	while(idx != sc->dc_cdata.dc_tx_prod) {
2227 		u_int32_t		txstat;
2228 
2229 		offset = offsetof(struct dc_list_data, dc_tx_list[idx]);
2230 		bus_dmamap_sync(sc->sc_dmat, sc->sc_listmap,
2231 		    offset, sizeof(struct dc_desc),
2232 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2233 
2234 		cur_tx = &sc->dc_ldata->dc_tx_list[idx];
2235 		txstat = letoh32(cur_tx->dc_status);
2236 
2237 		if (txstat & DC_TXSTAT_OWN)
2238 			break;
2239 
2240 		if (!(cur_tx->dc_ctl & htole32(DC_TXCTL_LASTFRAG)) ||
2241 		    cur_tx->dc_ctl & htole32(DC_TXCTL_SETUP)) {
2242 			if (cur_tx->dc_ctl & htole32(DC_TXCTL_SETUP)) {
2243 				/*
2244 				 * Yes, the PNIC is so brain damaged
2245 				 * that it will sometimes generate a TX
2246 				 * underrun error while DMAing the RX
2247 				 * filter setup frame. If we detect this,
2248 				 * we have to send the setup frame again,
2249 				 * or else the filter won't be programmed
2250 				 * correctly.
2251 				 */
2252 				if (DC_IS_PNIC(sc)) {
2253 					if (txstat & DC_TXSTAT_ERRSUM)
2254 						dc_setfilt(sc);
2255 				}
2256 				sc->dc_cdata.dc_tx_chain[idx].sd_mbuf = NULL;
2257 			}
2258 			sc->dc_cdata.dc_tx_cnt--;
2259 			DC_INC(idx, DC_TX_LIST_CNT);
2260 			continue;
2261 		}
2262 
2263 		if (DC_IS_XIRCOM(sc) || DC_IS_CONEXANT(sc)) {
2264 			/*
2265 			 * XXX: Why does my Xircom taunt me so?
2266 			 * For some reason it likes setting the CARRLOST flag
2267 			 * even when the carrier is there. wtf?!
2268 			 * Who knows, but Conexant chips have the
2269 			 * same problem. Maybe they took lessons
2270 			 * from Xircom.
2271 			 */
2272 			if (/*sc->dc_type == DC_TYPE_21143 &&*/
2273 			    sc->dc_pmode == DC_PMODE_MII &&
2274 			    ((txstat & 0xFFFF) & ~(DC_TXSTAT_ERRSUM|
2275 			    DC_TXSTAT_NOCARRIER)))
2276 				txstat &= ~DC_TXSTAT_ERRSUM;
2277 		} else {
2278 			if (/*sc->dc_type == DC_TYPE_21143 &&*/
2279 			    sc->dc_pmode == DC_PMODE_MII &&
2280 		    	    ((txstat & 0xFFFF) & ~(DC_TXSTAT_ERRSUM|
2281 		    	    DC_TXSTAT_NOCARRIER|DC_TXSTAT_CARRLOST)))
2282 				txstat &= ~DC_TXSTAT_ERRSUM;
2283 		}
2284 
2285 		if (txstat & DC_TXSTAT_ERRSUM) {
2286 			ifp->if_oerrors++;
2287 			if (txstat & DC_TXSTAT_EXCESSCOLL)
2288 				ifp->if_collisions++;
2289 			if (txstat & DC_TXSTAT_LATECOLL)
2290 				ifp->if_collisions++;
2291 			if (!(txstat & DC_TXSTAT_UNDERRUN)) {
2292 				dc_init(sc);
2293 				return;
2294 			}
2295 		}
2296 
2297 		ifp->if_collisions += (txstat & DC_TXSTAT_COLLCNT) >> 3;
2298 
2299 		ifp->if_opackets++;
2300 		if (sc->dc_cdata.dc_tx_chain[idx].sd_map->dm_nsegs != 0) {
2301 			bus_dmamap_t map = sc->dc_cdata.dc_tx_chain[idx].sd_map;
2302 
2303 			bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
2304 			    BUS_DMASYNC_POSTWRITE);
2305 			bus_dmamap_unload(sc->sc_dmat, map);
2306 		}
2307 		if (sc->dc_cdata.dc_tx_chain[idx].sd_mbuf != NULL) {
2308 			m_freem(sc->dc_cdata.dc_tx_chain[idx].sd_mbuf);
2309 			sc->dc_cdata.dc_tx_chain[idx].sd_mbuf = NULL;
2310 		}
2311 
2312 		bus_dmamap_sync(sc->sc_dmat, sc->sc_listmap,
2313 		    offset, sizeof(struct dc_desc),
2314 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2315 
2316 		sc->dc_cdata.dc_tx_cnt--;
2317 		DC_INC(idx, DC_TX_LIST_CNT);
2318 	}
2319 	sc->dc_cdata.dc_tx_cons = idx;
2320 
2321 	if (DC_TX_LIST_CNT - sc->dc_cdata.dc_tx_cnt > 5)
2322 		ifp->if_flags &= ~IFF_OACTIVE;
2323 	if (sc->dc_cdata.dc_tx_cnt == 0)
2324 		ifp->if_timer = 0;
2325 }
2326 
2327 void
2328 dc_tick(void *xsc)
2329 {
2330 	struct dc_softc *sc = (struct dc_softc *)xsc;
2331 	struct mii_data *mii;
2332 	struct ifnet *ifp;
2333 	int s;
2334 	u_int32_t r;
2335 
2336 	s = splnet();
2337 
2338 	ifp = &sc->sc_arpcom.ac_if;
2339 	mii = &sc->sc_mii;
2340 
2341 	if (sc->dc_flags & DC_REDUCED_MII_POLL) {
2342 		if (sc->dc_flags & DC_21143_NWAY) {
2343 			r = CSR_READ_4(sc, DC_10BTSTAT);
2344 			if (IFM_SUBTYPE(mii->mii_media_active) ==
2345 			    IFM_100_TX && (r & DC_TSTAT_LS100)) {
2346 				sc->dc_link = 0;
2347 				mii_mediachg(mii);
2348 			}
2349 			if (IFM_SUBTYPE(mii->mii_media_active) ==
2350 			    IFM_10_T && (r & DC_TSTAT_LS10)) {
2351 				sc->dc_link = 0;
2352 				mii_mediachg(mii);
2353 			}
2354 			if (sc->dc_link == 0)
2355 				mii_tick(mii);
2356 		} else {
2357 			/*
2358 			 * For NICs which never report DC_RXSTATE_WAIT, we
2359 			 * have to bite the bullet...
2360 			 */
2361 			if ((DC_HAS_BROKEN_RXSTATE(sc) || (CSR_READ_4(sc,
2362 			    DC_ISR) & DC_ISR_RX_STATE) == DC_RXSTATE_WAIT) &&
2363 			    sc->dc_cdata.dc_tx_cnt == 0 && !DC_IS_ASIX(sc)) {
2364 				mii_tick(mii);
2365 				if (!(mii->mii_media_status & IFM_ACTIVE))
2366 					sc->dc_link = 0;
2367 			}
2368 		}
2369 	} else
2370 		mii_tick(mii);
2371 
2372 	/*
2373 	 * When the init routine completes, we expect to be able to send
2374 	 * packets right away, and in fact the network code will send a
2375 	 * gratuitous ARP the moment the init routine marks the interface
2376 	 * as running. However, even though the MAC may have been initialized,
2377 	 * there may be a delay of a few seconds before the PHY completes
2378 	 * autonegotiation and the link is brought up. Any transmissions
2379 	 * made during that delay will be lost. Dealing with this is tricky:
2380 	 * we can't just pause in the init routine while waiting for the
2381 	 * PHY to come ready since that would bring the whole system to
2382 	 * a screeching halt for several seconds.
2383 	 *
2384 	 * What we do here is prevent the TX start routine from sending
2385 	 * any packets until a link has been established. After the
2386 	 * interface has been initialized, the tick routine will poll
2387 	 * the state of the PHY until the IFM_ACTIVE flag is set. Until
2388 	 * that time, packets will stay in the send queue, and once the
2389 	 * link comes up, they will be flushed out to the wire.
2390 	 */
2391 	if (!sc->dc_link && mii->mii_media_status & IFM_ACTIVE &&
2392 	    IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) {
2393 		sc->dc_link++;
2394 		if (IFQ_IS_EMPTY(&ifp->if_snd) == 0)
2395 	 	    dc_start(ifp);
2396 	}
2397 
2398 	if (sc->dc_flags & DC_21143_NWAY && !sc->dc_link)
2399 		timeout_add(&sc->dc_tick_tmo, hz / 10);
2400 	else
2401 		timeout_add_sec(&sc->dc_tick_tmo, 1);
2402 
2403 	splx(s);
2404 }
2405 
2406 /* A transmit underrun has occurred.  Back off the transmit threshold,
2407  * or switch to store and forward mode if we have to.
2408  */
2409 void
2410 dc_tx_underrun(struct dc_softc *sc)
2411 {
2412 	u_int32_t	isr;
2413 	int		i;
2414 
2415 	if (DC_IS_DAVICOM(sc))
2416 		dc_init(sc);
2417 
2418 	if (DC_IS_INTEL(sc)) {
2419 		/*
2420 		 * The real 21143 requires that the transmitter be idle
2421 		 * in order to change the transmit threshold or store
2422 		 * and forward state.
2423 		 */
2424 		DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_TX_ON);
2425 
2426 		for (i = 0; i < DC_TIMEOUT; i++) {
2427 			isr = CSR_READ_4(sc, DC_ISR);
2428 			if (isr & DC_ISR_TX_IDLE)
2429 				break;
2430 			DELAY(10);
2431 		}
2432 		if (i == DC_TIMEOUT) {
2433 			printf("%s: failed to force tx to idle state\n",
2434 			    sc->sc_dev.dv_xname);
2435 			dc_init(sc);
2436 		}
2437 	}
2438 
2439 	sc->dc_txthresh += DC_TXTHRESH_INC;
2440 	if (sc->dc_txthresh > DC_TXTHRESH_MAX) {
2441 		DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_STORENFWD);
2442 	} else {
2443 		DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_TX_THRESH);
2444 		DC_SETBIT(sc, DC_NETCFG, sc->dc_txthresh);
2445 	}
2446 
2447 	if (DC_IS_INTEL(sc))
2448 		DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_TX_ON);
2449 
2450 	return;
2451 }
2452 
2453 int
2454 dc_intr(void *arg)
2455 {
2456 	struct dc_softc *sc;
2457 	struct ifnet *ifp;
2458 	u_int32_t status;
2459 	int claimed = 0;
2460 
2461 	sc = arg;
2462 
2463 	ifp = &sc->sc_arpcom.ac_if;
2464 
2465 	if ((CSR_READ_4(sc, DC_ISR) & DC_INTRS) == 0)
2466 		return (claimed);
2467 
2468 	/* Suppress unwanted interrupts */
2469 	if (!(ifp->if_flags & IFF_UP)) {
2470 		if (CSR_READ_4(sc, DC_ISR) & DC_INTRS)
2471 			dc_stop(sc);
2472 		return (claimed);
2473 	}
2474 
2475 	/* Disable interrupts. */
2476 	CSR_WRITE_4(sc, DC_IMR, 0x00000000);
2477 
2478 	while (((status = CSR_READ_4(sc, DC_ISR)) & DC_INTRS) &&
2479 	    status != 0xFFFFFFFF &&
2480 	    (ifp->if_flags & IFF_RUNNING)) {
2481 
2482 		claimed = 1;
2483 		CSR_WRITE_4(sc, DC_ISR, status);
2484 
2485 		if (status & DC_ISR_RX_OK) {
2486 			int		curpkts;
2487 			curpkts = ifp->if_ipackets;
2488 			dc_rxeof(sc);
2489 			if (curpkts == ifp->if_ipackets) {
2490 				while(dc_rx_resync(sc))
2491 					dc_rxeof(sc);
2492 			}
2493 		}
2494 
2495 		if (status & (DC_ISR_TX_OK|DC_ISR_TX_NOBUF))
2496 			dc_txeof(sc);
2497 
2498 		if (status & DC_ISR_TX_IDLE) {
2499 			dc_txeof(sc);
2500 			if (sc->dc_cdata.dc_tx_cnt) {
2501 				DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_TX_ON);
2502 				CSR_WRITE_4(sc, DC_TXSTART, 0xFFFFFFFF);
2503 			}
2504 		}
2505 
2506 		if (status & DC_ISR_TX_UNDERRUN)
2507 			dc_tx_underrun(sc);
2508 
2509 		if ((status & DC_ISR_RX_WATDOGTIMEO)
2510 		    || (status & DC_ISR_RX_NOBUF)) {
2511 			int		curpkts;
2512 			curpkts = ifp->if_ipackets;
2513 			dc_rxeof(sc);
2514 			if (curpkts == ifp->if_ipackets) {
2515 				while(dc_rx_resync(sc))
2516 					dc_rxeof(sc);
2517 			}
2518 		}
2519 
2520 		if (status & DC_ISR_BUS_ERR) {
2521 			dc_reset(sc);
2522 			dc_init(sc);
2523 		}
2524 	}
2525 
2526 	/* Re-enable interrupts. */
2527 	CSR_WRITE_4(sc, DC_IMR, DC_INTRS);
2528 
2529 	if (IFQ_IS_EMPTY(&ifp->if_snd) == 0)
2530 		dc_start(ifp);
2531 
2532 	return (claimed);
2533 }
2534 
2535 /*
2536  * Encapsulate an mbuf chain in a descriptor by coupling the mbuf data
2537  * pointers to the fragment pointers.
2538  */
2539 int
2540 dc_encap(struct dc_softc *sc, struct mbuf *m_head, u_int32_t *txidx)
2541 {
2542 	struct dc_desc *f = NULL;
2543 	int frag, cur, cnt = 0, i;
2544 	bus_dmamap_t map;
2545 
2546 	/*
2547  	 * Start packing the mbufs in this chain into
2548 	 * the fragment pointers. Stop when we run out
2549  	 * of fragments or hit the end of the mbuf chain.
2550 	 */
2551 	map = sc->sc_tx_sparemap;
2552 
2553 	if (bus_dmamap_load_mbuf(sc->sc_dmat, map,
2554 	    m_head, BUS_DMA_NOWAIT) != 0)
2555 		return (ENOBUFS);
2556 
2557 	cur = frag = *txidx;
2558 
2559 	for (i = 0; i < map->dm_nsegs; i++) {
2560 		if (sc->dc_flags & DC_TX_ADMTEK_WAR) {
2561 			if (*txidx != sc->dc_cdata.dc_tx_prod &&
2562 			    frag == (DC_TX_LIST_CNT - 1)) {
2563 				bus_dmamap_unload(sc->sc_dmat, map);
2564 				return (ENOBUFS);
2565 			}
2566 		}
2567 		if ((DC_TX_LIST_CNT -
2568 		    (sc->dc_cdata.dc_tx_cnt + cnt)) < 5) {
2569 			bus_dmamap_unload(sc->sc_dmat, map);
2570 			return (ENOBUFS);
2571 		}
2572 
2573 		f = &sc->dc_ldata->dc_tx_list[frag];
2574 		f->dc_ctl = htole32(DC_TXCTL_TLINK | map->dm_segs[i].ds_len);
2575 		if (cnt == 0) {
2576 			f->dc_status = htole32(0);
2577 			f->dc_ctl |= htole32(DC_TXCTL_FIRSTFRAG);
2578 		} else
2579 			f->dc_status = htole32(DC_TXSTAT_OWN);
2580 		f->dc_data = htole32(map->dm_segs[i].ds_addr);
2581 		cur = frag;
2582 		DC_INC(frag, DC_TX_LIST_CNT);
2583 		cnt++;
2584 	}
2585 
2586 	sc->dc_cdata.dc_tx_cnt += cnt;
2587 	sc->dc_cdata.dc_tx_chain[cur].sd_mbuf = m_head;
2588 	sc->sc_tx_sparemap = sc->dc_cdata.dc_tx_chain[cur].sd_map;
2589 	sc->dc_cdata.dc_tx_chain[cur].sd_map = map;
2590 	sc->dc_ldata->dc_tx_list[cur].dc_ctl |= htole32(DC_TXCTL_LASTFRAG);
2591 	if (sc->dc_flags & DC_TX_INTR_FIRSTFRAG)
2592 		sc->dc_ldata->dc_tx_list[*txidx].dc_ctl |=
2593 		    htole32(DC_TXCTL_FINT);
2594 	if (sc->dc_flags & DC_TX_INTR_ALWAYS)
2595 		sc->dc_ldata->dc_tx_list[cur].dc_ctl |=
2596 		    htole32(DC_TXCTL_FINT);
2597 	if (sc->dc_flags & DC_TX_USE_TX_INTR && sc->dc_cdata.dc_tx_cnt > 64)
2598 		sc->dc_ldata->dc_tx_list[cur].dc_ctl |=
2599 		    htole32(DC_TXCTL_FINT);
2600 	else if ((sc->dc_flags & DC_TX_USE_TX_INTR) &&
2601 		 TBR_IS_ENABLED(&sc->sc_arpcom.ac_if.if_snd))
2602 		sc->dc_ldata->dc_tx_list[cur].dc_ctl |=
2603 		    htole32(DC_TXCTL_FINT);
2604 	bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
2605 	    BUS_DMASYNC_PREWRITE);
2606 
2607 	sc->dc_ldata->dc_tx_list[*txidx].dc_status = htole32(DC_TXSTAT_OWN);
2608 
2609 	bus_dmamap_sync(sc->sc_dmat, sc->sc_listmap,
2610 	    offsetof(struct dc_list_data, dc_tx_list[*txidx]),
2611 	    sizeof(struct dc_desc) * cnt,
2612 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2613 
2614 	*txidx = frag;
2615 
2616 	return (0);
2617 }
2618 
2619 /*
2620  * Coalesce an mbuf chain into a single mbuf cluster buffer.
2621  * Needed for some really badly behaved chips that just can't
2622  * do scatter/gather correctly.
2623  */
2624 int
2625 dc_coal(struct dc_softc *sc, struct mbuf **m_head)
2626 {
2627 	struct mbuf		*m_new, *m;
2628 
2629 	m = *m_head;
2630 	MGETHDR(m_new, M_DONTWAIT, MT_DATA);
2631 	if (m_new == NULL)
2632 		return (ENOBUFS);
2633 	if (m->m_pkthdr.len > MHLEN) {
2634 		MCLGET(m_new, M_DONTWAIT);
2635 		if (!(m_new->m_flags & M_EXT)) {
2636 			m_freem(m_new);
2637 			return (ENOBUFS);
2638 		}
2639 	}
2640 	m_copydata(m, 0, m->m_pkthdr.len, mtod(m_new, caddr_t));
2641 	m_new->m_pkthdr.len = m_new->m_len = m->m_pkthdr.len;
2642 	m_freem(m);
2643 	*m_head = m_new;
2644 
2645 	return (0);
2646 }
2647 
2648 /*
2649  * Main transmit routine. To avoid having to do mbuf copies, we put pointers
2650  * to the mbuf data regions directly in the transmit lists. We also save a
2651  * copy of the pointers since the transmit list fragment pointers are
2652  * physical addresses.
2653  */
2654 
2655 void
2656 dc_start(struct ifnet *ifp)
2657 {
2658 	struct dc_softc *sc;
2659 	struct mbuf *m_head = NULL;
2660 	int idx;
2661 
2662 	sc = ifp->if_softc;
2663 
2664 	if (!sc->dc_link && ifp->if_snd.ifq_len < 10)
2665 		return;
2666 
2667 	if (ifp->if_flags & IFF_OACTIVE)
2668 		return;
2669 
2670 	idx = sc->dc_cdata.dc_tx_prod;
2671 
2672 	while(sc->dc_cdata.dc_tx_chain[idx].sd_mbuf == NULL) {
2673 		IFQ_POLL(&ifp->if_snd, m_head);
2674 		if (m_head == NULL)
2675 			break;
2676 
2677 		if (sc->dc_flags & DC_TX_COALESCE &&
2678 		    (m_head->m_next != NULL ||
2679 			sc->dc_flags & DC_TX_ALIGN)) {
2680 			/* note: dc_coal breaks the poll-and-dequeue rule.
2681 			 * if dc_coal fails, we lose the packet.
2682 			 */
2683 			IFQ_DEQUEUE(&ifp->if_snd, m_head);
2684 			if (dc_coal(sc, &m_head)) {
2685 				ifp->if_flags |= IFF_OACTIVE;
2686 				break;
2687 			}
2688 		}
2689 
2690 		if (dc_encap(sc, m_head, &idx)) {
2691 			ifp->if_flags |= IFF_OACTIVE;
2692 			break;
2693 		}
2694 
2695 		/* now we are committed to transmit the packet */
2696 		if (sc->dc_flags & DC_TX_COALESCE) {
2697 			/* if mbuf is coalesced, it is already dequeued */
2698 		} else
2699 			IFQ_DEQUEUE(&ifp->if_snd, m_head);
2700 
2701 		/*
2702 		 * If there's a BPF listener, bounce a copy of this frame
2703 		 * to him.
2704 		 */
2705 #if NBPFILTER > 0
2706 		if (ifp->if_bpf)
2707 			bpf_mtap(ifp->if_bpf, m_head, BPF_DIRECTION_OUT);
2708 #endif
2709 		if (sc->dc_flags & DC_TX_ONE) {
2710 			ifp->if_flags |= IFF_OACTIVE;
2711 			break;
2712 		}
2713 	}
2714 	if (idx == sc->dc_cdata.dc_tx_prod)
2715 		return;
2716 
2717 	/* Transmit */
2718 	sc->dc_cdata.dc_tx_prod = idx;
2719 	if (!(sc->dc_flags & DC_TX_POLL))
2720 		CSR_WRITE_4(sc, DC_TXSTART, 0xFFFFFFFF);
2721 
2722 	/*
2723 	 * Set a timeout in case the chip goes out to lunch.
2724 	 */
2725 	ifp->if_timer = 5;
2726 }
2727 
2728 void
2729 dc_init(void *xsc)
2730 {
2731 	struct dc_softc *sc = xsc;
2732 	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
2733 	struct mii_data *mii;
2734 	int s;
2735 
2736 	s = splnet();
2737 
2738 	mii = &sc->sc_mii;
2739 
2740 	/*
2741 	 * Cancel pending I/O and free all RX/TX buffers.
2742 	 */
2743 	dc_stop(sc);
2744 	dc_reset(sc);
2745 
2746 	/*
2747 	 * Set cache alignment and burst length.
2748 	 */
2749 	if (DC_IS_ASIX(sc) || DC_IS_DAVICOM(sc))
2750 		CSR_WRITE_4(sc, DC_BUSCTL, 0);
2751 	else
2752 		CSR_WRITE_4(sc, DC_BUSCTL, DC_BUSCTL_MRME|DC_BUSCTL_MRLE);
2753 	/*
2754 	 * Evenly share the bus between receive and transmit process.
2755 	 */
2756 	if (DC_IS_INTEL(sc))
2757 		DC_SETBIT(sc, DC_BUSCTL, DC_BUSCTL_ARBITRATION);
2758 	if (DC_IS_DAVICOM(sc) || DC_IS_INTEL(sc)) {
2759 		DC_SETBIT(sc, DC_BUSCTL, DC_BURSTLEN_USECA);
2760 	} else {
2761 		DC_SETBIT(sc, DC_BUSCTL, DC_BURSTLEN_16LONG);
2762 	}
2763 	if (sc->dc_flags & DC_TX_POLL)
2764 		DC_SETBIT(sc, DC_BUSCTL, DC_TXPOLL_1);
2765 	switch(sc->dc_cachesize) {
2766 	case 32:
2767 		DC_SETBIT(sc, DC_BUSCTL, DC_CACHEALIGN_32LONG);
2768 		break;
2769 	case 16:
2770 		DC_SETBIT(sc, DC_BUSCTL, DC_CACHEALIGN_16LONG);
2771 		break;
2772 	case 8:
2773 		DC_SETBIT(sc, DC_BUSCTL, DC_CACHEALIGN_8LONG);
2774 		break;
2775 	case 0:
2776 	default:
2777 		DC_SETBIT(sc, DC_BUSCTL, DC_CACHEALIGN_NONE);
2778 		break;
2779 	}
2780 
2781 	if (sc->dc_flags & DC_TX_STORENFWD)
2782 		DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_STORENFWD);
2783 	else {
2784 		if (sc->dc_txthresh > DC_TXTHRESH_MAX) {
2785 			DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_STORENFWD);
2786 		} else {
2787 			DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_STORENFWD);
2788 			DC_SETBIT(sc, DC_NETCFG, sc->dc_txthresh);
2789 		}
2790 	}
2791 
2792 	DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_NO_RXCRC);
2793 	DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_TX_BACKOFF);
2794 
2795 	if (DC_IS_MACRONIX(sc) || DC_IS_PNICII(sc)) {
2796 		/*
2797 		 * The app notes for the 98713 and 98715A say that
2798 		 * in order to have the chips operate properly, a magic
2799 		 * number must be written to CSR16. Macronix does not
2800 		 * document the meaning of these bits so there's no way
2801 		 * to know exactly what they do. The 98713 has a magic
2802 		 * number all its own; the rest all use a different one.
2803 		 */
2804 		DC_CLRBIT(sc, DC_MX_MAGICPACKET, 0xFFFF0000);
2805 		if (sc->dc_type == DC_TYPE_98713)
2806 			DC_SETBIT(sc, DC_MX_MAGICPACKET, DC_MX_MAGIC_98713);
2807 		else
2808 			DC_SETBIT(sc, DC_MX_MAGICPACKET, DC_MX_MAGIC_98715);
2809 	}
2810 
2811 	if (DC_IS_XIRCOM(sc)) {
2812 		CSR_WRITE_4(sc, DC_SIAGP, DC_SIAGP_WRITE_EN | DC_SIAGP_INT1_EN |
2813 		    DC_SIAGP_MD_GP2_OUTPUT | DC_SIAGP_MD_GP0_OUTPUT);
2814 		DELAY(10);
2815 		CSR_WRITE_4(sc, DC_SIAGP, DC_SIAGP_INT1_EN |
2816 		    DC_SIAGP_MD_GP2_OUTPUT | DC_SIAGP_MD_GP0_OUTPUT);
2817 		DELAY(10);
2818 	}
2819 
2820 	DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_TX_THRESH);
2821 	DC_SETBIT(sc, DC_NETCFG, DC_TXTHRESH_MIN);
2822 
2823 	/* Init circular RX list. */
2824 	if (dc_list_rx_init(sc) == ENOBUFS) {
2825 		printf("%s: initialization failed: no "
2826 		    "memory for rx buffers\n", sc->sc_dev.dv_xname);
2827 		dc_stop(sc);
2828 		splx(s);
2829 		return;
2830 	}
2831 
2832 	/*
2833 	 * Init tx descriptors.
2834 	 */
2835 	dc_list_tx_init(sc);
2836 
2837 	/*
2838 	 * Sync down both lists initialized.
2839 	 */
2840 	bus_dmamap_sync(sc->sc_dmat, sc->sc_listmap,
2841 	    0, sc->sc_listmap->dm_mapsize,
2842 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2843 
2844 	/*
2845 	 * Load the address of the RX list.
2846 	 */
2847 	CSR_WRITE_4(sc, DC_RXADDR, sc->sc_listmap->dm_segs[0].ds_addr +
2848 	    offsetof(struct dc_list_data, dc_rx_list[0]));
2849 	CSR_WRITE_4(sc, DC_TXADDR, sc->sc_listmap->dm_segs[0].ds_addr +
2850 	    offsetof(struct dc_list_data, dc_tx_list[0]));
2851 
2852 	/*
2853 	 * Enable interrupts.
2854 	 */
2855 	CSR_WRITE_4(sc, DC_IMR, DC_INTRS);
2856 	CSR_WRITE_4(sc, DC_ISR, 0xFFFFFFFF);
2857 
2858 	/* Enable transmitter. */
2859 	DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_TX_ON);
2860 
2861 	/*
2862 	 * If this is an Intel 21143 and we're not using the
2863 	 * MII port, program the LED control pins so we get
2864 	 * link and activity indications.
2865 	 */
2866 	if (sc->dc_flags & DC_TULIP_LEDS) {
2867 		CSR_WRITE_4(sc, DC_WATCHDOG,
2868 		    DC_WDOG_CTLWREN|DC_WDOG_LINK|DC_WDOG_ACTIVITY);
2869 		CSR_WRITE_4(sc, DC_WATCHDOG, 0);
2870 	}
2871 
2872 	/*
2873 	 * Load the RX/multicast filter. We do this sort of late
2874 	 * because the filter programming scheme on the 21143 and
2875 	 * some clones requires DMAing a setup frame via the TX
2876 	 * engine, and we need the transmitter enabled for that.
2877 	 */
2878 	dc_setfilt(sc);
2879 
2880 	/* Enable receiver. */
2881 	DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_RX_ON);
2882 	CSR_WRITE_4(sc, DC_RXSTART, 0xFFFFFFFF);
2883 
2884 	mii_mediachg(mii);
2885 	dc_setcfg(sc, sc->dc_if_media);
2886 
2887 	ifp->if_flags |= IFF_RUNNING;
2888 	ifp->if_flags &= ~IFF_OACTIVE;
2889 
2890 	splx(s);
2891 
2892 	timeout_set(&sc->dc_tick_tmo, dc_tick, sc);
2893 
2894 	if (IFM_SUBTYPE(mii->mii_media.ifm_media) == IFM_HPNA_1)
2895 		sc->dc_link = 1;
2896 	else {
2897 		if (sc->dc_flags & DC_21143_NWAY)
2898 			timeout_add(&sc->dc_tick_tmo, hz / 10);
2899 		else
2900 			timeout_add_sec(&sc->dc_tick_tmo, 1);
2901 	}
2902 
2903 #ifdef SRM_MEDIA
2904 	if(sc->dc_srm_media) {
2905 		struct ifreq ifr;
2906 
2907 		ifr.ifr_media = sc->dc_srm_media;
2908 		ifmedia_ioctl(ifp, &ifr, &mii->mii_media, SIOCSIFMEDIA);
2909 		sc->dc_srm_media = 0;
2910 	}
2911 #endif
2912 }
2913 
2914 /*
2915  * Set media options.
2916  */
2917 int
2918 dc_ifmedia_upd(struct ifnet *ifp)
2919 {
2920 	struct dc_softc *sc;
2921 	struct mii_data *mii;
2922 	struct ifmedia *ifm;
2923 
2924 	sc = ifp->if_softc;
2925 	mii = &sc->sc_mii;
2926 	mii_mediachg(mii);
2927 
2928 	ifm = &mii->mii_media;
2929 
2930 	if (DC_IS_DAVICOM(sc) &&
2931 	    IFM_SUBTYPE(ifm->ifm_media) == IFM_HPNA_1)
2932 		dc_setcfg(sc, ifm->ifm_media);
2933 	else
2934 		sc->dc_link = 0;
2935 
2936 	return (0);
2937 }
2938 
2939 /*
2940  * Report current media status.
2941  */
2942 void
2943 dc_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
2944 {
2945 	struct dc_softc *sc;
2946 	struct mii_data *mii;
2947 	struct ifmedia *ifm;
2948 
2949 	sc = ifp->if_softc;
2950 	mii = &sc->sc_mii;
2951 	mii_pollstat(mii);
2952 	ifm = &mii->mii_media;
2953 	if (DC_IS_DAVICOM(sc)) {
2954 		if (IFM_SUBTYPE(ifm->ifm_media) == IFM_HPNA_1) {
2955 			ifmr->ifm_active = ifm->ifm_media;
2956 			ifmr->ifm_status = 0;
2957 			return;
2958 		}
2959 	}
2960 	ifmr->ifm_active = mii->mii_media_active;
2961 	ifmr->ifm_status = mii->mii_media_status;
2962 }
2963 
2964 int
2965 dc_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
2966 {
2967 	struct dc_softc		*sc = ifp->if_softc;
2968 	struct ifreq		*ifr = (struct ifreq *) data;
2969 	struct ifaddr		*ifa = (struct ifaddr *)data;
2970 	struct mii_data		*mii;
2971 	int			s, error = 0;
2972 
2973 	s = splnet();
2974 
2975 	switch(command) {
2976 	case SIOCSIFADDR:
2977 		ifp->if_flags |= IFF_UP;
2978 		if (!(ifp->if_flags & IFF_RUNNING))
2979 			dc_init(sc);
2980 #ifdef INET
2981 		if (ifa->ifa_addr->sa_family == AF_INET)
2982 			arp_ifinit(&sc->sc_arpcom, ifa);
2983 #endif
2984 		break;
2985 	case SIOCSIFFLAGS:
2986 		if (ifp->if_flags & IFF_UP) {
2987 			if (ifp->if_flags & IFF_RUNNING &&
2988 			    (ifp->if_flags ^ sc->dc_if_flags) &
2989 			     IFF_PROMISC) {
2990 				dc_setfilt(sc);
2991 			} else {
2992 				if (!(ifp->if_flags & IFF_RUNNING)) {
2993 					sc->dc_txthresh = 0;
2994 					dc_init(sc);
2995 				}
2996 			}
2997 		} else {
2998 			if (ifp->if_flags & IFF_RUNNING)
2999 				dc_stop(sc);
3000 		}
3001 		sc->dc_if_flags = ifp->if_flags;
3002 		break;
3003 	case SIOCGIFMEDIA:
3004 	case SIOCSIFMEDIA:
3005 		mii = &sc->sc_mii;
3006 		error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command);
3007 #ifdef SRM_MEDIA
3008 		if (sc->dc_srm_media)
3009 			sc->dc_srm_media = 0;
3010 #endif
3011 		break;
3012 	default:
3013 		error = ether_ioctl(ifp, &sc->sc_arpcom, command, data);
3014 	}
3015 
3016 	if (error == ENETRESET) {
3017 		if (ifp->if_flags & IFF_RUNNING)
3018 			dc_setfilt(sc);
3019 		error = 0;
3020 	}
3021 
3022 	splx(s);
3023 	return (error);
3024 }
3025 
3026 void
3027 dc_watchdog(struct ifnet *ifp)
3028 {
3029 	struct dc_softc *sc;
3030 
3031 	sc = ifp->if_softc;
3032 
3033 	ifp->if_oerrors++;
3034 	printf("%s: watchdog timeout\n", sc->sc_dev.dv_xname);
3035 
3036 	dc_stop(sc);
3037 	dc_reset(sc);
3038 	dc_init(sc);
3039 
3040 	if (IFQ_IS_EMPTY(&ifp->if_snd) == 0)
3041 		dc_start(ifp);
3042 }
3043 
3044 /*
3045  * Stop the adapter and free any mbufs allocated to the
3046  * RX and TX lists.
3047  */
3048 void
3049 dc_stop(struct dc_softc *sc)
3050 {
3051 	struct ifnet *ifp;
3052 	int i;
3053 
3054 	ifp = &sc->sc_arpcom.ac_if;
3055 	ifp->if_timer = 0;
3056 
3057 	timeout_del(&sc->dc_tick_tmo);
3058 
3059 	ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
3060 
3061 	DC_CLRBIT(sc, DC_NETCFG, (DC_NETCFG_RX_ON|DC_NETCFG_TX_ON));
3062 	CSR_WRITE_4(sc, DC_IMR, 0x00000000);
3063 	CSR_WRITE_4(sc, DC_TXADDR, 0x00000000);
3064 	CSR_WRITE_4(sc, DC_RXADDR, 0x00000000);
3065 	sc->dc_link = 0;
3066 
3067 	/*
3068 	 * Free data in the RX lists.
3069 	 */
3070 	for (i = 0; i < DC_RX_LIST_CNT; i++) {
3071 		if (sc->dc_cdata.dc_rx_chain[i].sd_map->dm_nsegs != 0) {
3072 			bus_dmamap_t map = sc->dc_cdata.dc_rx_chain[i].sd_map;
3073 
3074 			bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
3075 			    BUS_DMASYNC_POSTREAD);
3076 			bus_dmamap_unload(sc->sc_dmat, map);
3077 		}
3078 		if (sc->dc_cdata.dc_rx_chain[i].sd_mbuf != NULL) {
3079 			m_freem(sc->dc_cdata.dc_rx_chain[i].sd_mbuf);
3080 			sc->dc_cdata.dc_rx_chain[i].sd_mbuf = NULL;
3081 		}
3082 	}
3083 	bzero((char *)&sc->dc_ldata->dc_rx_list,
3084 		sizeof(sc->dc_ldata->dc_rx_list));
3085 
3086 	/*
3087 	 * Free the TX list buffers.
3088 	 */
3089 	for (i = 0; i < DC_TX_LIST_CNT; i++) {
3090 		if (sc->dc_cdata.dc_tx_chain[i].sd_map->dm_nsegs != 0) {
3091 			bus_dmamap_t map = sc->dc_cdata.dc_tx_chain[i].sd_map;
3092 
3093 			bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
3094 			    BUS_DMASYNC_POSTWRITE);
3095 			bus_dmamap_unload(sc->sc_dmat, map);
3096 		}
3097 		if (sc->dc_cdata.dc_tx_chain[i].sd_mbuf != NULL) {
3098 			if (sc->dc_ldata->dc_tx_list[i].dc_ctl &
3099 			    htole32(DC_TXCTL_SETUP)) {
3100 				sc->dc_cdata.dc_tx_chain[i].sd_mbuf = NULL;
3101 				continue;
3102 			}
3103 			m_freem(sc->dc_cdata.dc_tx_chain[i].sd_mbuf);
3104 			sc->dc_cdata.dc_tx_chain[i].sd_mbuf = NULL;
3105 		}
3106 	}
3107 	bzero((char *)&sc->dc_ldata->dc_tx_list,
3108 		sizeof(sc->dc_ldata->dc_tx_list));
3109 
3110 	bus_dmamap_sync(sc->sc_dmat, sc->sc_listmap,
3111 	    0, sc->sc_listmap->dm_mapsize,
3112 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
3113 }
3114 
3115 /*
3116  * Stop all chip I/O so that the kernel's probe routines don't
3117  * get confused by errant DMAs when rebooting.
3118  */
3119 void
3120 dc_shutdown(void *v)
3121 {
3122 	struct dc_softc *sc = (struct dc_softc *)v;
3123 
3124 	dc_stop(sc);
3125 }
3126 
3127 void
3128 dc_power(int why, void *arg)
3129 {
3130 	struct dc_softc *sc = arg;
3131 	struct ifnet *ifp;
3132 	int s;
3133 
3134 	s = splnet();
3135 	if (why != PWR_RESUME)
3136 		dc_stop(sc);
3137 	else {
3138 		ifp = &sc->sc_arpcom.ac_if;
3139 		if (ifp->if_flags & IFF_UP)
3140 			dc_init(sc);
3141 	}
3142 	splx(s);
3143 }
3144 
3145 struct cfdriver dc_cd = {
3146 	0, "dc", DV_IFNET
3147 };
3148