xref: /openbsd-src/sys/dev/pci/if_sis.c (revision 2b0358df1d88d06ef4139321dd05bd5e05d91eaf)
1 /*	$OpenBSD: if_sis.c,v 1.87 2009/02/24 21:10:14 claudio Exp $ */
2 /*
3  * Copyright (c) 1997, 1998, 1999
4  *	Bill Paul <wpaul@ctr.columbia.edu>.  All rights reserved.
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions
8  * are met:
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice, this list of conditions and the following disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  * 3. All advertising materials mentioning features or use of this software
15  *    must display the following acknowledgement:
16  *	This product includes software developed by Bill Paul.
17  * 4. Neither the name of the author nor the names of any co-contributors
18  *    may be used to endorse or promote products derived from this software
19  *    without specific prior written permission.
20  *
21  * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
22  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24  * ARE DISCLAIMED.  IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
25  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
26  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
27  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
28  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
29  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
30  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
31  * THE POSSIBILITY OF SUCH DAMAGE.
32  *
33  * $FreeBSD: src/sys/pci/if_sis.c,v 1.30 2001/02/06 10:11:47 phk Exp $
34  */
35 
36 /*
37  * SiS 900/SiS 7016 fast ethernet PCI NIC driver. Datasheets are
38  * available from http://www.sis.com.tw.
39  *
40  * This driver also supports the NatSemi DP83815. Datasheets are
41  * available from http://www.national.com.
42  *
43  * Written by Bill Paul <wpaul@ee.columbia.edu>
44  * Electrical Engineering Department
45  * Columbia University, New York City
46  */
47 
48 /*
49  * The SiS 900 is a fairly simple chip. It uses bus master DMA with
50  * simple TX and RX descriptors of 3 longwords in size. The receiver
51  * has a single perfect filter entry for the station address and a
52  * 128-bit multicast hash table. The SiS 900 has a built-in MII-based
53  * transceiver while the 7016 requires an external transceiver chip.
54  * Both chips offer the standard bit-bang MII interface as well as
55  * an enchanced PHY interface which simplifies accessing MII registers.
56  *
57  * The only downside to this chipset is that RX descriptors must be
58  * longword aligned.
59  */
60 
61 #include "bpfilter.h"
62 
63 #include <sys/param.h>
64 #include <sys/systm.h>
65 #include <sys/mbuf.h>
66 #include <sys/protosw.h>
67 #include <sys/socket.h>
68 #include <sys/ioctl.h>
69 #include <sys/errno.h>
70 #include <sys/malloc.h>
71 #include <sys/kernel.h>
72 #include <sys/timeout.h>
73 
74 #include <net/if.h>
75 #include <net/if_dl.h>
76 #include <net/if_types.h>
77 
78 #ifdef INET
79 #include <netinet/in.h>
80 #include <netinet/in_systm.h>
81 #include <netinet/in_var.h>
82 #include <netinet/ip.h>
83 #include <netinet/if_ether.h>
84 #endif
85 
86 #include <net/if_media.h>
87 
88 #if NBPFILTER > 0
89 #include <net/bpf.h>
90 #endif
91 
92 #include <sys/device.h>
93 
94 #include <dev/mii/mii.h>
95 #include <dev/mii/miivar.h>
96 
97 #include <dev/pci/pcireg.h>
98 #include <dev/pci/pcivar.h>
99 #include <dev/pci/pcidevs.h>
100 
101 #define SIS_USEIOSPACE
102 
103 #include <dev/pci/if_sisreg.h>
104 
105 int sis_probe(struct device *, void *, void *);
106 void sis_attach(struct device *, struct device *, void *);
107 
108 struct cfattach sis_ca = {
109 	sizeof(struct sis_softc), sis_probe, sis_attach
110 };
111 
112 struct cfdriver sis_cd = {
113 	0, "sis", DV_IFNET
114 };
115 
116 int sis_intr(void *);
117 void sis_shutdown(void *);
118 int sis_newbuf(struct sis_softc *, struct sis_desc *, struct mbuf *);
119 int sis_encap(struct sis_softc *, struct mbuf *, u_int32_t *);
120 void sis_rxeof(struct sis_softc *);
121 void sis_rxeoc(struct sis_softc *);
122 void sis_txeof(struct sis_softc *);
123 void sis_tick(void *);
124 void sis_start(struct ifnet *);
125 int sis_ioctl(struct ifnet *, u_long, caddr_t);
126 void sis_init(void *);
127 void sis_stop(struct sis_softc *);
128 void sis_watchdog(struct ifnet *);
129 int sis_ifmedia_upd(struct ifnet *);
130 void sis_ifmedia_sts(struct ifnet *, struct ifmediareq *);
131 
132 u_int16_t sis_reverse(u_int16_t);
133 void sis_delay(struct sis_softc *);
134 void sis_eeprom_idle(struct sis_softc *);
135 void sis_eeprom_putbyte(struct sis_softc *, int);
136 void sis_eeprom_getword(struct sis_softc *, int, u_int16_t *);
137 #if defined(__amd64__) || defined(__i386__)
138 void sis_read_cmos(struct sis_softc *, struct pci_attach_args *, caddr_t, int, int);
139 #endif
140 void sis_read_mac(struct sis_softc *, struct pci_attach_args *);
141 void sis_read_eeprom(struct sis_softc *, caddr_t, int, int, int);
142 void sis_read96x_mac(struct sis_softc *);
143 
144 void sis_mii_sync(struct sis_softc *);
145 void sis_mii_send(struct sis_softc *, u_int32_t, int);
146 int sis_mii_readreg(struct sis_softc *, struct sis_mii_frame *);
147 int sis_mii_writereg(struct sis_softc *, struct sis_mii_frame *);
148 int sis_miibus_readreg(struct device *, int, int);
149 void sis_miibus_writereg(struct device *, int, int, int);
150 void sis_miibus_statchg(struct device *);
151 
152 u_int32_t sis_mchash(struct sis_softc *, const uint8_t *);
153 void sis_setmulti(struct sis_softc *);
154 void sis_setmulti_sis(struct sis_softc *);
155 void sis_setmulti_ns(struct sis_softc *);
156 void sis_setpromisc(struct sis_softc *);
157 void sis_reset(struct sis_softc *);
158 int sis_ring_init(struct sis_softc *);
159 
160 #define SIS_SETBIT(sc, reg, x)				\
161 	CSR_WRITE_4(sc, reg,				\
162 		CSR_READ_4(sc, reg) | (x))
163 
164 #define SIS_CLRBIT(sc, reg, x)				\
165 	CSR_WRITE_4(sc, reg,				\
166 		CSR_READ_4(sc, reg) & ~(x))
167 
168 #define SIO_SET(x)					\
169 	CSR_WRITE_4(sc, SIS_EECTL, CSR_READ_4(sc, SIS_EECTL) | x)
170 
171 #define SIO_CLR(x)					\
172 	CSR_WRITE_4(sc, SIS_EECTL, CSR_READ_4(sc, SIS_EECTL) & ~x)
173 
174 const struct pci_matchid sis_devices[] = {
175 	{ PCI_VENDOR_SIS, PCI_PRODUCT_SIS_900 },
176 	{ PCI_VENDOR_SIS, PCI_PRODUCT_SIS_7016 },
177 	{ PCI_VENDOR_NS, PCI_PRODUCT_NS_DP83815 }
178 };
179 
180 /*
181  * Routine to reverse the bits in a word. Stolen almost
182  * verbatim from /usr/games/fortune.
183  */
184 u_int16_t
185 sis_reverse(u_int16_t n)
186 {
187 	n = ((n >>  1) & 0x5555) | ((n <<  1) & 0xaaaa);
188 	n = ((n >>  2) & 0x3333) | ((n <<  2) & 0xcccc);
189 	n = ((n >>  4) & 0x0f0f) | ((n <<  4) & 0xf0f0);
190 	n = ((n >>  8) & 0x00ff) | ((n <<  8) & 0xff00);
191 
192 	return (n);
193 }
194 
195 void
196 sis_delay(struct sis_softc *sc)
197 {
198 	int			idx;
199 
200 	for (idx = (300 / 33) + 1; idx > 0; idx--)
201 		CSR_READ_4(sc, SIS_CSR);
202 }
203 
204 void
205 sis_eeprom_idle(struct sis_softc *sc)
206 {
207 	int			i;
208 
209 	SIO_SET(SIS_EECTL_CSEL);
210 	sis_delay(sc);
211 	SIO_SET(SIS_EECTL_CLK);
212 	sis_delay(sc);
213 
214 	for (i = 0; i < 25; i++) {
215 		SIO_CLR(SIS_EECTL_CLK);
216 		sis_delay(sc);
217 		SIO_SET(SIS_EECTL_CLK);
218 		sis_delay(sc);
219 	}
220 
221 	SIO_CLR(SIS_EECTL_CLK);
222 	sis_delay(sc);
223 	SIO_CLR(SIS_EECTL_CSEL);
224 	sis_delay(sc);
225 	CSR_WRITE_4(sc, SIS_EECTL, 0x00000000);
226 }
227 
228 /*
229  * Send a read command and address to the EEPROM, check for ACK.
230  */
231 void
232 sis_eeprom_putbyte(struct sis_softc *sc, int addr)
233 {
234 	int			d, i;
235 
236 	d = addr | SIS_EECMD_READ;
237 
238 	/*
239 	 * Feed in each bit and strobe the clock.
240 	 */
241 	for (i = 0x400; i; i >>= 1) {
242 		if (d & i)
243 			SIO_SET(SIS_EECTL_DIN);
244 		else
245 			SIO_CLR(SIS_EECTL_DIN);
246 		sis_delay(sc);
247 		SIO_SET(SIS_EECTL_CLK);
248 		sis_delay(sc);
249 		SIO_CLR(SIS_EECTL_CLK);
250 		sis_delay(sc);
251 	}
252 }
253 
254 /*
255  * Read a word of data stored in the EEPROM at address 'addr.'
256  */
257 void
258 sis_eeprom_getword(struct sis_softc *sc, int addr, u_int16_t *dest)
259 {
260 	int			i;
261 	u_int16_t		word = 0;
262 
263 	/* Force EEPROM to idle state. */
264 	sis_eeprom_idle(sc);
265 
266 	/* Enter EEPROM access mode. */
267 	sis_delay(sc);
268 	SIO_CLR(SIS_EECTL_CLK);
269 	sis_delay(sc);
270 	SIO_SET(SIS_EECTL_CSEL);
271 	sis_delay(sc);
272 
273 	/*
274 	 * Send address of word we want to read.
275 	 */
276 	sis_eeprom_putbyte(sc, addr);
277 
278 	/*
279 	 * Start reading bits from EEPROM.
280 	 */
281 	for (i = 0x8000; i; i >>= 1) {
282 		SIO_SET(SIS_EECTL_CLK);
283 		sis_delay(sc);
284 		if (CSR_READ_4(sc, SIS_EECTL) & SIS_EECTL_DOUT)
285 			word |= i;
286 		sis_delay(sc);
287 		SIO_CLR(SIS_EECTL_CLK);
288 		sis_delay(sc);
289 	}
290 
291 	/* Turn off EEPROM access mode. */
292 	sis_eeprom_idle(sc);
293 
294 	*dest = word;
295 }
296 
297 /*
298  * Read a sequence of words from the EEPROM.
299  */
300 void
301 sis_read_eeprom(struct sis_softc *sc, caddr_t dest,
302     int off, int cnt, int swap)
303 {
304 	int			i;
305 	u_int16_t		word = 0, *ptr;
306 
307 	for (i = 0; i < cnt; i++) {
308 		sis_eeprom_getword(sc, off + i, &word);
309 		ptr = (u_int16_t *)(dest + (i * 2));
310 		if (swap)
311 			*ptr = ntohs(word);
312 		else
313 			*ptr = word;
314 	}
315 }
316 
317 #if defined(__amd64__) || defined(__i386__)
318 void
319 sis_read_cmos(struct sis_softc *sc, struct pci_attach_args *pa,
320     caddr_t dest, int off, int cnt)
321 {
322 	u_int32_t reg;
323 	int i;
324 
325 	reg = pci_conf_read(pa->pa_pc, pa->pa_tag, 0x48);
326 	pci_conf_write(pa->pa_pc, pa->pa_tag, 0x48, reg | 0x40);
327 
328 	for (i = 0; i < cnt; i++) {
329 		bus_space_write_1(pa->pa_iot, 0x0, 0x70, i + off);
330 		*(dest + i) = bus_space_read_1(pa->pa_iot, 0x0, 0x71);
331 	}
332 
333 	pci_conf_write(pa->pa_pc, pa->pa_tag, 0x48, reg & ~0x40);
334 }
335 #endif
336 
337 void
338 sis_read_mac(struct sis_softc *sc, struct pci_attach_args *pa)
339 {
340 	u_int16_t *enaddr = (u_int16_t *) &sc->arpcom.ac_enaddr;
341 
342 	SIS_SETBIT(sc, SIS_CSR, SIS_CSR_RELOAD);
343 	SIS_CLRBIT(sc, SIS_CSR, SIS_CSR_RELOAD);
344 
345 	SIS_CLRBIT(sc, SIS_RXFILT_CTL, SIS_RXFILTCTL_ENABLE);
346 
347 	CSR_WRITE_4(sc, SIS_RXFILT_CTL, SIS_FILTADDR_PAR0);
348 	enaddr[0] = CSR_READ_4(sc, SIS_RXFILT_DATA) & 0xffff;
349 	CSR_WRITE_4(sc, SIS_RXFILT_CTL, SIS_FILTADDR_PAR1);
350 	enaddr[1] = CSR_READ_4(sc, SIS_RXFILT_DATA) & 0xffff;
351 	CSR_WRITE_4(sc, SIS_RXFILT_CTL, SIS_FILTADDR_PAR2);
352 	enaddr[2] = CSR_READ_4(sc, SIS_RXFILT_DATA) & 0xffff;
353 
354 	SIS_SETBIT(sc, SIS_RXFILT_CTL, SIS_RXFILTCTL_ENABLE);
355 }
356 
357 void
358 sis_read96x_mac(struct sis_softc *sc)
359 {
360 	int i;
361 
362 	SIO_SET(SIS96x_EECTL_REQ);
363 
364 	for (i = 0; i < 2000; i++) {
365 		if ((CSR_READ_4(sc, SIS_EECTL) & SIS96x_EECTL_GNT)) {
366 			sis_read_eeprom(sc, (caddr_t)&sc->arpcom.ac_enaddr,
367 			    SIS_EE_NODEADDR, 3, 0);
368 			break;
369 		} else
370 			DELAY(1);
371 	}
372 
373 	SIO_SET(SIS96x_EECTL_DONE);
374 }
375 
376 /*
377  * Sync the PHYs by setting data bit and strobing the clock 32 times.
378  */
379 void
380 sis_mii_sync(struct sis_softc *sc)
381 {
382 	int			i;
383 
384  	SIO_SET(SIS_MII_DIR|SIS_MII_DATA);
385 
386  	for (i = 0; i < 32; i++) {
387  		SIO_SET(SIS_MII_CLK);
388  		DELAY(1);
389  		SIO_CLR(SIS_MII_CLK);
390  		DELAY(1);
391  	}
392 }
393 
394 /*
395  * Clock a series of bits through the MII.
396  */
397 void
398 sis_mii_send(struct sis_softc *sc, u_int32_t bits, int cnt)
399 {
400 	int			i;
401 
402 	SIO_CLR(SIS_MII_CLK);
403 
404 	for (i = (0x1 << (cnt - 1)); i; i >>= 1) {
405 		if (bits & i)
406 			SIO_SET(SIS_MII_DATA);
407 		else
408 			SIO_CLR(SIS_MII_DATA);
409 		DELAY(1);
410 		SIO_CLR(SIS_MII_CLK);
411 		DELAY(1);
412 		SIO_SET(SIS_MII_CLK);
413 	}
414 }
415 
416 /*
417  * Read an PHY register through the MII.
418  */
419 int
420 sis_mii_readreg(struct sis_softc *sc, struct sis_mii_frame *frame)
421 {
422 	int			i, ack, s;
423 
424 	s = splnet();
425 
426 	/*
427 	 * Set up frame for RX.
428 	 */
429 	frame->mii_stdelim = SIS_MII_STARTDELIM;
430 	frame->mii_opcode = SIS_MII_READOP;
431 	frame->mii_turnaround = 0;
432 	frame->mii_data = 0;
433 
434 	/*
435  	 * Turn on data xmit.
436 	 */
437 	SIO_SET(SIS_MII_DIR);
438 
439 	sis_mii_sync(sc);
440 
441 	/*
442 	 * Send command/address info.
443 	 */
444 	sis_mii_send(sc, frame->mii_stdelim, 2);
445 	sis_mii_send(sc, frame->mii_opcode, 2);
446 	sis_mii_send(sc, frame->mii_phyaddr, 5);
447 	sis_mii_send(sc, frame->mii_regaddr, 5);
448 
449 	/* Idle bit */
450 	SIO_CLR((SIS_MII_CLK|SIS_MII_DATA));
451 	DELAY(1);
452 	SIO_SET(SIS_MII_CLK);
453 	DELAY(1);
454 
455 	/* Turn off xmit. */
456 	SIO_CLR(SIS_MII_DIR);
457 
458 	/* Check for ack */
459 	SIO_CLR(SIS_MII_CLK);
460 	DELAY(1);
461 	ack = CSR_READ_4(sc, SIS_EECTL) & SIS_MII_DATA;
462 	SIO_SET(SIS_MII_CLK);
463 	DELAY(1);
464 
465 	/*
466 	 * Now try reading data bits. If the ack failed, we still
467 	 * need to clock through 16 cycles to keep the PHY(s) in sync.
468 	 */
469 	if (ack) {
470 		for(i = 0; i < 16; i++) {
471 			SIO_CLR(SIS_MII_CLK);
472 			DELAY(1);
473 			SIO_SET(SIS_MII_CLK);
474 			DELAY(1);
475 		}
476 		goto fail;
477 	}
478 
479 	for (i = 0x8000; i; i >>= 1) {
480 		SIO_CLR(SIS_MII_CLK);
481 		DELAY(1);
482 		if (!ack) {
483 			if (CSR_READ_4(sc, SIS_EECTL) & SIS_MII_DATA)
484 				frame->mii_data |= i;
485 			DELAY(1);
486 		}
487 		SIO_SET(SIS_MII_CLK);
488 		DELAY(1);
489 	}
490 
491 fail:
492 
493 	SIO_CLR(SIS_MII_CLK);
494 	DELAY(1);
495 	SIO_SET(SIS_MII_CLK);
496 	DELAY(1);
497 
498 	splx(s);
499 
500 	if (ack)
501 		return (1);
502 	return (0);
503 }
504 
505 /*
506  * Write to a PHY register through the MII.
507  */
508 int
509 sis_mii_writereg(struct sis_softc *sc, struct sis_mii_frame *frame)
510 {
511 	int			s;
512 
513 	s = splnet();
514  	/*
515  	 * Set up frame for TX.
516  	 */
517 
518  	frame->mii_stdelim = SIS_MII_STARTDELIM;
519  	frame->mii_opcode = SIS_MII_WRITEOP;
520  	frame->mii_turnaround = SIS_MII_TURNAROUND;
521 
522  	/*
523   	 * Turn on data output.
524  	 */
525  	SIO_SET(SIS_MII_DIR);
526 
527  	sis_mii_sync(sc);
528 
529  	sis_mii_send(sc, frame->mii_stdelim, 2);
530  	sis_mii_send(sc, frame->mii_opcode, 2);
531  	sis_mii_send(sc, frame->mii_phyaddr, 5);
532  	sis_mii_send(sc, frame->mii_regaddr, 5);
533  	sis_mii_send(sc, frame->mii_turnaround, 2);
534  	sis_mii_send(sc, frame->mii_data, 16);
535 
536  	/* Idle bit. */
537  	SIO_SET(SIS_MII_CLK);
538  	DELAY(1);
539  	SIO_CLR(SIS_MII_CLK);
540  	DELAY(1);
541 
542  	/*
543  	 * Turn off xmit.
544  	 */
545  	SIO_CLR(SIS_MII_DIR);
546 
547  	splx(s);
548 
549  	return (0);
550 }
551 
552 int
553 sis_miibus_readreg(struct device *self, int phy, int reg)
554 {
555 	struct sis_softc	*sc = (struct sis_softc *)self;
556 	struct sis_mii_frame    frame;
557 
558 	if (sc->sis_type == SIS_TYPE_83815) {
559 		if (phy != 0)
560 			return (0);
561 		/*
562 		 * The NatSemi chip can take a while after
563 		 * a reset to come ready, during which the BMSR
564 		 * returns a value of 0. This is *never* supposed
565 		 * to happen: some of the BMSR bits are meant to
566 		 * be hardwired in the on position, and this can
567 		 * confuse the miibus code a bit during the probe
568 		 * and attach phase. So we make an effort to check
569 		 * for this condition and wait for it to clear.
570 		 */
571 		if (!CSR_READ_4(sc, NS_BMSR))
572 			DELAY(1000);
573 		return CSR_READ_4(sc, NS_BMCR + (reg * 4));
574 	}
575 
576 	/*
577 	 * Chipsets < SIS_635 seem not to be able to read/write
578 	 * through mdio. Use the enhanced PHY access register
579 	 * again for them.
580 	 */
581 	if (sc->sis_type == SIS_TYPE_900 &&
582 	    sc->sis_rev < SIS_REV_635) {
583 		int i, val = 0;
584 
585 		if (phy != 0)
586 			return (0);
587 
588 		CSR_WRITE_4(sc, SIS_PHYCTL,
589 		    (phy << 11) | (reg << 6) | SIS_PHYOP_READ);
590 		SIS_SETBIT(sc, SIS_PHYCTL, SIS_PHYCTL_ACCESS);
591 
592 		for (i = 0; i < SIS_TIMEOUT; i++) {
593 			if (!(CSR_READ_4(sc, SIS_PHYCTL) & SIS_PHYCTL_ACCESS))
594 				break;
595 		}
596 
597 		if (i == SIS_TIMEOUT) {
598 			printf("%s: PHY failed to come ready\n",
599 			    sc->sc_dev.dv_xname);
600 			return (0);
601 		}
602 
603 		val = (CSR_READ_4(sc, SIS_PHYCTL) >> 16) & 0xFFFF;
604 
605 		if (val == 0xFFFF)
606 			return (0);
607 
608 		return (val);
609 	} else {
610 		bzero((char *)&frame, sizeof(frame));
611 
612 		frame.mii_phyaddr = phy;
613 		frame.mii_regaddr = reg;
614 		sis_mii_readreg(sc, &frame);
615 
616 		return (frame.mii_data);
617 	}
618 }
619 
620 void
621 sis_miibus_writereg(struct device *self, int phy, int reg, int data)
622 {
623 	struct sis_softc	*sc = (struct sis_softc *)self;
624 	struct sis_mii_frame	frame;
625 
626 	if (sc->sis_type == SIS_TYPE_83815) {
627 		if (phy != 0)
628 			return;
629 		CSR_WRITE_4(sc, NS_BMCR + (reg * 4), data);
630 		return;
631 	}
632 
633 	/*
634 	 * Chipsets < SIS_635 seem not to be able to read/write
635 	 * through mdio. Use the enhanced PHY access register
636 	 * again for them.
637 	 */
638 	if (sc->sis_type == SIS_TYPE_900 &&
639 	    sc->sis_rev < SIS_REV_635) {
640 		int i;
641 
642 		if (phy != 0)
643 			return;
644 
645 		CSR_WRITE_4(sc, SIS_PHYCTL, (data << 16) | (phy << 11) |
646 		    (reg << 6) | SIS_PHYOP_WRITE);
647 		SIS_SETBIT(sc, SIS_PHYCTL, SIS_PHYCTL_ACCESS);
648 
649 		for (i = 0; i < SIS_TIMEOUT; i++) {
650 			if (!(CSR_READ_4(sc, SIS_PHYCTL) & SIS_PHYCTL_ACCESS))
651 				break;
652 		}
653 
654 		if (i == SIS_TIMEOUT)
655 			printf("%s: PHY failed to come ready\n",
656 			    sc->sc_dev.dv_xname);
657 	} else {
658 		bzero((char *)&frame, sizeof(frame));
659 
660 		frame.mii_phyaddr = phy;
661 		frame.mii_regaddr = reg;
662 		frame.mii_data = data;
663 		sis_mii_writereg(sc, &frame);
664 	}
665 }
666 
667 void
668 sis_miibus_statchg(struct device *self)
669 {
670 	struct sis_softc	*sc = (struct sis_softc *)self;
671 
672 	sis_init(sc);
673 }
674 
675 u_int32_t
676 sis_mchash(struct sis_softc *sc, const uint8_t *addr)
677 {
678 	uint32_t		crc;
679 
680 	/* Compute CRC for the address value. */
681 	crc = ether_crc32_be(addr, ETHER_ADDR_LEN);
682 
683 	/*
684 	 * return the filter bit position
685 	 *
686 	 * The NatSemi chip has a 512-bit filter, which is
687 	 * different than the SiS, so we special-case it.
688 	 */
689 	if (sc->sis_type == SIS_TYPE_83815)
690 		return (crc >> 23);
691 	else if (sc->sis_rev >= SIS_REV_635 ||
692 	    sc->sis_rev == SIS_REV_900B)
693 		return (crc >> 24);
694 	else
695 		return (crc >> 25);
696 }
697 
698 void
699 sis_setmulti(struct sis_softc *sc)
700 {
701 	if (sc->sis_type == SIS_TYPE_83815)
702 		sis_setmulti_ns(sc);
703 	else
704 		sis_setmulti_sis(sc);
705 }
706 
707 void
708 sis_setmulti_ns(struct sis_softc *sc)
709 {
710 	struct ifnet		*ifp;
711 	struct arpcom		*ac = &sc->arpcom;
712 	struct ether_multi	*enm;
713 	struct ether_multistep  step;
714 	u_int32_t		h = 0, i, filtsave;
715 	int			bit, index;
716 
717 	ifp = &sc->arpcom.ac_if;
718 
719 	if (ifp->if_flags & IFF_ALLMULTI || ifp->if_flags & IFF_PROMISC) {
720 allmulti:
721 		SIS_CLRBIT(sc, SIS_RXFILT_CTL, NS_RXFILTCTL_MCHASH);
722 		SIS_SETBIT(sc, SIS_RXFILT_CTL, SIS_RXFILTCTL_ALLMULTI);
723 		return;
724 	}
725 
726 	ETHER_FIRST_MULTI(step, ac, enm);
727 	while (enm != NULL) {
728 		if (bcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
729 			ifp->if_flags |= IFF_ALLMULTI;
730 			goto allmulti;
731 		}
732 		ETHER_NEXT_MULTI(step, enm);
733 	}
734 
735 	/*
736 	 * We have to explicitly enable the multicast hash table
737 	 * on the NatSemi chip if we want to use it, which we do.
738 	 */
739 	SIS_SETBIT(sc, SIS_RXFILT_CTL, NS_RXFILTCTL_MCHASH);
740 	SIS_CLRBIT(sc, SIS_RXFILT_CTL, SIS_RXFILTCTL_ALLMULTI);
741 
742 	filtsave = CSR_READ_4(sc, SIS_RXFILT_CTL);
743 
744 	/* first, zot all the existing hash bits */
745 	for (i = 0; i < 32; i++) {
746 		CSR_WRITE_4(sc, SIS_RXFILT_CTL, NS_FILTADDR_FMEM_LO + (i*2));
747 		CSR_WRITE_4(sc, SIS_RXFILT_DATA, 0);
748 	}
749 
750 	ETHER_FIRST_MULTI(step, ac, enm);
751 	while (enm != NULL) {
752 		h = sis_mchash(sc, enm->enm_addrlo);
753 		index = h >> 3;
754 		bit = h & 0x1F;
755 		CSR_WRITE_4(sc, SIS_RXFILT_CTL, NS_FILTADDR_FMEM_LO + index);
756 		if (bit > 0xF)
757 			bit -= 0x10;
758 		SIS_SETBIT(sc, SIS_RXFILT_DATA, (1 << bit));
759 		ETHER_NEXT_MULTI(step, enm);
760 	}
761 
762 	CSR_WRITE_4(sc, SIS_RXFILT_CTL, filtsave);
763 }
764 
765 void
766 sis_setmulti_sis(struct sis_softc *sc)
767 {
768 	struct ifnet		*ifp;
769 	struct arpcom		*ac = &sc->arpcom;
770 	struct ether_multi	*enm;
771 	struct ether_multistep	step;
772 	u_int32_t		h, i, n, ctl;
773 	u_int16_t		hashes[16];
774 
775 	ifp = &sc->arpcom.ac_if;
776 
777 	/* hash table size */
778 	if (sc->sis_rev >= SIS_REV_635 ||
779 	    sc->sis_rev == SIS_REV_900B)
780 		n = 16;
781 	else
782 		n = 8;
783 
784 	ctl = CSR_READ_4(sc, SIS_RXFILT_CTL) & SIS_RXFILTCTL_ENABLE;
785 
786 	if (ifp->if_flags & IFF_BROADCAST)
787 		ctl |= SIS_RXFILTCTL_BROAD;
788 
789 	if (ifp->if_flags & IFF_ALLMULTI || ifp->if_flags & IFF_PROMISC) {
790 allmulti:
791 		ctl |= SIS_RXFILTCTL_ALLMULTI;
792 		if (ifp->if_flags & IFF_PROMISC)
793 			ctl |= SIS_RXFILTCTL_BROAD|SIS_RXFILTCTL_ALLPHYS;
794 		for (i = 0; i < n; i++)
795 			hashes[i] = ~0;
796 	} else {
797 		for (i = 0; i < n; i++)
798 			hashes[i] = 0;
799 		i = 0;
800 		ETHER_FIRST_MULTI(step, ac, enm);
801 		while (enm != NULL) {
802 			if (bcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
803 				ifp->if_flags |= IFF_ALLMULTI;
804 				goto allmulti;
805 			}
806 
807 			h = sis_mchash(sc, enm->enm_addrlo);
808 			hashes[h >> 4] |= 1 << (h & 0xf);
809 			i++;
810 			ETHER_NEXT_MULTI(step, enm);
811 		}
812 		if (i > n) {
813 			ctl |= SIS_RXFILTCTL_ALLMULTI;
814 			for (i = 0; i < n; i++)
815 				hashes[i] = ~0;
816 		}
817 	}
818 
819 	for (i = 0; i < n; i++) {
820 		CSR_WRITE_4(sc, SIS_RXFILT_CTL, (4 + i) << 16);
821 		CSR_WRITE_4(sc, SIS_RXFILT_DATA, hashes[i]);
822 	}
823 
824 	CSR_WRITE_4(sc, SIS_RXFILT_CTL, ctl);
825 }
826 
827 void
828 sis_setpromisc(struct sis_softc *sc)
829 {
830 	struct ifnet	*ifp = &sc->arpcom.ac_if;
831 
832 	/* If we want promiscuous mode, set the allframes bit. */
833 	if (ifp->if_flags & IFF_PROMISC)
834 		SIS_SETBIT(sc, SIS_RXFILT_CTL, SIS_RXFILTCTL_ALLPHYS);
835 	else
836 		SIS_CLRBIT(sc, SIS_RXFILT_CTL, SIS_RXFILTCTL_ALLPHYS);
837 }
838 
839 void
840 sis_reset(struct sis_softc *sc)
841 {
842 	int			i;
843 
844 	SIS_SETBIT(sc, SIS_CSR, SIS_CSR_RESET);
845 
846 	for (i = 0; i < SIS_TIMEOUT; i++) {
847 		if (!(CSR_READ_4(sc, SIS_CSR) & SIS_CSR_RESET))
848 			break;
849 	}
850 
851 	if (i == SIS_TIMEOUT)
852 		printf("%s: reset never completed\n", sc->sc_dev.dv_xname);
853 
854 	/* Wait a little while for the chip to get its brains in order. */
855 	DELAY(1000);
856 
857 	/*
858 	 * If this is a NetSemi chip, make sure to clear
859 	 * PME mode.
860 	 */
861 	if (sc->sis_type == SIS_TYPE_83815) {
862 		CSR_WRITE_4(sc, NS_CLKRUN, NS_CLKRUN_PMESTS);
863 		CSR_WRITE_4(sc, NS_CLKRUN, 0);
864 	}
865 }
866 
867 /*
868  * Probe for an SiS chip. Check the PCI vendor and device
869  * IDs against our list and return a device name if we find a match.
870  */
871 int
872 sis_probe(struct device *parent, void *match, void *aux)
873 {
874 	return (pci_matchbyid((struct pci_attach_args *)aux, sis_devices,
875 	    sizeof(sis_devices)/sizeof(sis_devices[0])));
876 }
877 
878 /*
879  * Attach the interface. Allocate softc structures, do ifmedia
880  * setup and ethernet/BPF attach.
881  */
882 void
883 sis_attach(struct device *parent, struct device *self, void *aux)
884 {
885 	int			i;
886 	const char		*intrstr = NULL;
887 	pcireg_t		command;
888 	struct sis_softc	*sc = (struct sis_softc *)self;
889 	struct pci_attach_args	*pa = aux;
890 	pci_chipset_tag_t	pc = pa->pa_pc;
891 	pci_intr_handle_t	ih;
892 	struct ifnet		*ifp;
893 	bus_size_t		size;
894 
895 	sc->sis_stopped = 1;
896 
897 	/*
898 	 * Handle power management nonsense.
899 	 */
900 	command = pci_conf_read(pc, pa->pa_tag, SIS_PCI_CAPID) & 0x000000FF;
901 	if (command == 0x01) {
902 
903 		command = pci_conf_read(pc, pa->pa_tag, SIS_PCI_PWRMGMTCTRL);
904 		if (command & SIS_PSTATE_MASK) {
905 			u_int32_t		iobase, membase, irq;
906 
907 			/* Save important PCI config data. */
908 			iobase = pci_conf_read(pc, pa->pa_tag, SIS_PCI_LOIO);
909 			membase = pci_conf_read(pc, pa->pa_tag, SIS_PCI_LOMEM);
910 			irq = pci_conf_read(pc, pa->pa_tag, SIS_PCI_INTLINE);
911 
912 			/* Reset the power state. */
913 			printf("%s: chip is in D%d power mode -- setting to D0\n",
914 			    sc->sc_dev.dv_xname, command & SIS_PSTATE_MASK);
915 			command &= 0xFFFFFFFC;
916 			pci_conf_write(pc, pa->pa_tag, SIS_PCI_PWRMGMTCTRL, command);
917 
918 			/* Restore PCI config data. */
919 			pci_conf_write(pc, pa->pa_tag, SIS_PCI_LOIO, iobase);
920 			pci_conf_write(pc, pa->pa_tag, SIS_PCI_LOMEM, membase);
921 			pci_conf_write(pc, pa->pa_tag, SIS_PCI_INTLINE, irq);
922 		}
923 	}
924 
925 	/*
926 	 * Map control/status registers.
927 	 */
928 
929 #ifdef SIS_USEIOSPACE
930 	if (pci_mapreg_map(pa, SIS_PCI_LOIO, PCI_MAPREG_TYPE_IO, 0,
931 	    &sc->sis_btag, &sc->sis_bhandle, NULL, &size, 0)) {
932 		printf(": can't map i/o space\n");
933 		return;
934  	}
935 #else
936 	if (pci_mapreg_map(pa, SIS_PCI_LOMEM, PCI_MAPREG_TYPE_MEM, 0,
937 	    &sc->sis_btag, &sc->sis_bhandle, NULL, &size, 0)) {
938  		printf(": can't map mem space\n");
939 		return;
940  	}
941 #endif
942 
943 	/* Allocate interrupt */
944 	if (pci_intr_map(pa, &ih)) {
945 		printf(": couldn't map interrupt\n");
946 		goto fail_1;
947 	}
948 	intrstr = pci_intr_string(pc, ih);
949 	sc->sc_ih = pci_intr_establish(pc, ih, IPL_NET, sis_intr, sc,
950 	    self->dv_xname);
951 	if (sc->sc_ih == NULL) {
952 		printf(": couldn't establish interrupt");
953 		if (intrstr != NULL)
954 			printf(" at %s", intrstr);
955 		printf("\n");
956 		goto fail_1;
957 	}
958 
959 	switch (PCI_PRODUCT(pa->pa_id)) {
960 	case PCI_PRODUCT_SIS_900:
961 		sc->sis_type = SIS_TYPE_900;
962 		break;
963 	case PCI_PRODUCT_SIS_7016:
964 		sc->sis_type = SIS_TYPE_7016;
965 		break;
966 	case PCI_PRODUCT_NS_DP83815:
967 		sc->sis_type = SIS_TYPE_83815;
968 		break;
969 	default:
970 		break;
971 	}
972 	sc->sis_rev = PCI_REVISION(pa->pa_class);
973 
974 	/* Reset the adapter. */
975 	sis_reset(sc);
976 
977 	if (sc->sis_type == SIS_TYPE_900 &&
978 	   (sc->sis_rev == SIS_REV_635 ||
979 	    sc->sis_rev == SIS_REV_900B)) {
980 		SIO_SET(SIS_CFG_RND_CNT);
981 		SIO_SET(SIS_CFG_PERR_DETECT);
982 	}
983 
984 	/*
985 	 * Get station address from the EEPROM.
986 	 */
987 	switch (PCI_VENDOR(pa->pa_id)) {
988 	case PCI_VENDOR_NS:
989 		sc->sis_srr = CSR_READ_4(sc, NS_SRR);
990 
991 		if (sc->sis_srr == NS_SRR_15C)
992 			printf(", DP83815C");
993 		else if (sc->sis_srr == NS_SRR_15D)
994 			printf(", DP83815D");
995 		else if (sc->sis_srr == NS_SRR_16A)
996 			printf(", DP83816A");
997 		else
998 			printf(", srr %x", sc->sis_srr);
999 
1000 		/*
1001 		 * Reading the MAC address out of the EEPROM on
1002 		 * the NatSemi chip takes a bit more work than
1003 		 * you'd expect. The address spans 4 16-bit words,
1004 		 * with the first word containing only a single bit.
1005 		 * You have to shift everything over one bit to
1006 		 * get it aligned properly. Also, the bits are
1007 		 * stored backwards (the LSB is really the MSB,
1008 		 * and so on) so you have to reverse them in order
1009 		 * to get the MAC address into the form we want.
1010 		 * Why? Who the hell knows.
1011 		 */
1012 		{
1013 			u_int16_t		tmp[4];
1014 
1015 			sis_read_eeprom(sc, (caddr_t)&tmp, NS_EE_NODEADDR,4,0);
1016 
1017 			/* Shift everything over one bit. */
1018 			tmp[3] = tmp[3] >> 1;
1019 			tmp[3] |= tmp[2] << 15;
1020 			tmp[2] = tmp[2] >> 1;
1021 			tmp[2] |= tmp[1] << 15;
1022 			tmp[1] = tmp[1] >> 1;
1023 			tmp[1] |= tmp[0] << 15;
1024 
1025 			/* Now reverse all the bits. */
1026 			tmp[3] = sis_reverse(tmp[3]);
1027 			tmp[2] = sis_reverse(tmp[2]);
1028 			tmp[1] = sis_reverse(tmp[1]);
1029 
1030 			bcopy((char *)&tmp[1], sc->arpcom.ac_enaddr,
1031 			    ETHER_ADDR_LEN);
1032 		}
1033 		break;
1034 	case PCI_VENDOR_SIS:
1035 	default:
1036 #if defined(__amd64__) || defined(__i386__)
1037 		/*
1038 		 * If this is a SiS 630E chipset with an embedded
1039 		 * SiS 900 controller, we have to read the MAC address
1040 		 * from the APC CMOS RAM. Our method for doing this
1041 		 * is very ugly since we have to reach out and grab
1042 		 * ahold of hardware for which we cannot properly
1043 		 * allocate resources. This code is only compiled on
1044 		 * the i386 architecture since the SiS 630E chipset
1045 		 * is for x86 motherboards only. Note that there are
1046 		 * a lot of magic numbers in this hack. These are
1047 		 * taken from SiS's Linux driver. I'd like to replace
1048 		 * them with proper symbolic definitions, but that
1049 		 * requires some datasheets that I don't have access
1050 		 * to at the moment.
1051 		 */
1052 		if (sc->sis_rev == SIS_REV_630S ||
1053 		    sc->sis_rev == SIS_REV_630E)
1054 			sis_read_cmos(sc, pa, (caddr_t)&sc->arpcom.ac_enaddr,
1055 			    0x9, 6);
1056 		else
1057 #endif
1058 		if (sc->sis_rev == SIS_REV_96x)
1059 			sis_read96x_mac(sc);
1060 		else if (sc->sis_rev == SIS_REV_635 ||
1061 		    sc->sis_rev == SIS_REV_630ET ||
1062 		    sc->sis_rev == SIS_REV_630EA1)
1063 			sis_read_mac(sc, pa);
1064 		else
1065 			sis_read_eeprom(sc, (caddr_t)&sc->arpcom.ac_enaddr,
1066 			    SIS_EE_NODEADDR, 3, 0);
1067 		break;
1068 	}
1069 
1070 	printf(": %s, address %s\n", intrstr,
1071 	    ether_sprintf(sc->arpcom.ac_enaddr));
1072 
1073 	sc->sc_dmat = pa->pa_dmat;
1074 
1075 	if (bus_dmamem_alloc(sc->sc_dmat, sizeof(struct sis_list_data),
1076 	    PAGE_SIZE, 0, sc->sc_listseg, 1, &sc->sc_listnseg,
1077 	    BUS_DMA_NOWAIT) != 0) {
1078 		printf(": can't alloc list mem\n");
1079 		goto fail_2;
1080 	}
1081 	if (bus_dmamem_map(sc->sc_dmat, sc->sc_listseg, sc->sc_listnseg,
1082 	    sizeof(struct sis_list_data), &sc->sc_listkva,
1083 	    BUS_DMA_NOWAIT) != 0) {
1084 		printf(": can't map list mem\n");
1085 		goto fail_2;
1086 	}
1087 	if (bus_dmamap_create(sc->sc_dmat, sizeof(struct sis_list_data), 1,
1088 	    sizeof(struct sis_list_data), 0, BUS_DMA_NOWAIT,
1089 	    &sc->sc_listmap) != 0) {
1090 		printf(": can't alloc list map\n");
1091 		goto fail_2;
1092 	}
1093 	if (bus_dmamap_load(sc->sc_dmat, sc->sc_listmap, sc->sc_listkva,
1094 	    sizeof(struct sis_list_data), NULL, BUS_DMA_NOWAIT) != 0) {
1095 		printf(": can't load list map\n");
1096 		goto fail_2;
1097 	}
1098 	sc->sis_ldata = (struct sis_list_data *)sc->sc_listkva;
1099 	bzero(sc->sis_ldata, sizeof(struct sis_list_data));
1100 
1101 	for (i = 0; i < SIS_RX_LIST_CNT_MAX; i++) {
1102 		if (bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, MCLBYTES, 0,
1103 		    BUS_DMA_NOWAIT, &sc->sis_ldata->sis_rx_list[i].map) != 0) {
1104 			printf(": can't create rx map\n");
1105 			goto fail_2;
1106 		}
1107 	}
1108 	if (bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, MCLBYTES, 0,
1109 	    BUS_DMA_NOWAIT, &sc->sc_rx_sparemap) != 0) {
1110 		printf(": can't create rx spare map\n");
1111 		goto fail_2;
1112 	}
1113 
1114 	for (i = 0; i < SIS_TX_LIST_CNT; i++) {
1115 		if (bus_dmamap_create(sc->sc_dmat, MCLBYTES,
1116 		    SIS_TX_LIST_CNT - 3, MCLBYTES, 0, BUS_DMA_NOWAIT,
1117 		    &sc->sis_ldata->sis_tx_list[i].map) != 0) {
1118 			printf(": can't create tx map\n");
1119 			goto fail_2;
1120 		}
1121 	}
1122 	if (bus_dmamap_create(sc->sc_dmat, MCLBYTES, SIS_TX_LIST_CNT - 3,
1123 	    MCLBYTES, 0, BUS_DMA_NOWAIT, &sc->sc_tx_sparemap) != 0) {
1124 		printf(": can't create tx spare map\n");
1125 		goto fail_2;
1126 	}
1127 
1128 	timeout_set(&sc->sis_timeout, sis_tick, sc);
1129 
1130 	ifp = &sc->arpcom.ac_if;
1131 	ifp->if_softc = sc;
1132 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1133 	ifp->if_ioctl = sis_ioctl;
1134 	ifp->if_start = sis_start;
1135 	ifp->if_watchdog = sis_watchdog;
1136 	ifp->if_baudrate = 10000000;
1137 	IFQ_SET_MAXLEN(&ifp->if_snd, SIS_TX_LIST_CNT - 1);
1138 	IFQ_SET_READY(&ifp->if_snd);
1139 	bcopy(sc->sc_dev.dv_xname, ifp->if_xname, IFNAMSIZ);
1140 
1141 	ifp->if_capabilities = IFCAP_VLAN_MTU;
1142 
1143 	sc->sc_mii.mii_ifp = ifp;
1144 	sc->sc_mii.mii_readreg = sis_miibus_readreg;
1145 	sc->sc_mii.mii_writereg = sis_miibus_writereg;
1146 	sc->sc_mii.mii_statchg = sis_miibus_statchg;
1147 	ifmedia_init(&sc->sc_mii.mii_media, 0, sis_ifmedia_upd,sis_ifmedia_sts);
1148 	mii_attach(self, &sc->sc_mii, 0xffffffff, MII_PHY_ANY, MII_OFFSET_ANY,
1149 	    0);
1150 	if (LIST_FIRST(&sc->sc_mii.mii_phys) == NULL) {
1151 		ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE, 0, NULL);
1152 		ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE);
1153 	} else
1154 		ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_AUTO);
1155 
1156 	/*
1157 	 * Call MI attach routines.
1158 	 */
1159 	if_attach(ifp);
1160 	ether_ifattach(ifp);
1161 
1162 	shutdownhook_establish(sis_shutdown, sc);
1163 	return;
1164 
1165 fail_2:
1166 	pci_intr_disestablish(pc, sc->sc_ih);
1167 
1168 fail_1:
1169 	bus_space_unmap(sc->sis_btag, sc->sis_bhandle, size);
1170 }
1171 
1172 /*
1173  * Initialize the TX and RX descriptors and allocate mbufs for them. Note that
1174  * we arrange the descriptors in a closed ring, so that the last descriptor
1175  * points back to the first.
1176  */
1177 int
1178 sis_ring_init(struct sis_softc *sc)
1179 {
1180 	struct sis_list_data	*ld;
1181 	struct sis_ring_data	*cd;
1182 	int			i, error, nexti;
1183 
1184 	cd = &sc->sis_cdata;
1185 	ld = sc->sis_ldata;
1186 
1187 	for (i = 0; i < SIS_TX_LIST_CNT; i++) {
1188 		if (i == (SIS_TX_LIST_CNT - 1))
1189 			nexti = 0;
1190 		else
1191 			nexti = i + 1;
1192 		ld->sis_tx_list[i].sis_nextdesc = &ld->sis_tx_list[nexti];
1193 		ld->sis_tx_list[i].sis_next = sc->sc_listmap->dm_segs[0].ds_addr +
1194 			offsetof(struct sis_list_data, sis_tx_list[nexti]);
1195 		ld->sis_tx_list[i].sis_mbuf = NULL;
1196 		ld->sis_tx_list[i].sis_ptr = 0;
1197 		ld->sis_tx_list[i].sis_ctl = 0;
1198 	}
1199 
1200 	cd->sis_tx_prod = cd->sis_tx_cons = cd->sis_tx_cnt = 0;
1201 
1202 	if (sc->arpcom.ac_if.if_flags & IFF_UP)
1203 		sc->sc_rxbufs = SIS_RX_LIST_CNT_MAX;
1204 	else
1205 		sc->sc_rxbufs = SIS_RX_LIST_CNT_MIN;
1206 
1207 	for (i = 0; i < sc->sc_rxbufs; i++) {
1208 		error = sis_newbuf(sc, &ld->sis_rx_list[i], NULL);
1209 		if (error)
1210 			return (error);
1211 		if (i == (sc->sc_rxbufs - 1))
1212 			nexti = 0;
1213 		else
1214 			nexti = i + 1;
1215 		ld->sis_rx_list[i].sis_nextdesc = &ld->sis_rx_list[nexti];
1216 		ld->sis_rx_list[i].sis_next = sc->sc_listmap->dm_segs[0].ds_addr +
1217 			offsetof(struct sis_list_data, sis_rx_list[nexti]);
1218 	}
1219 
1220 	cd->sis_rx_pdsc = &ld->sis_rx_list[0];
1221 
1222 	return (0);
1223 }
1224 
1225 /*
1226  * Initialize an RX descriptor and attach an MBUF cluster.
1227  */
1228 int
1229 sis_newbuf(struct sis_softc *sc, struct sis_desc *c, struct mbuf *m)
1230 {
1231 	struct mbuf		*m_new = NULL;
1232 	bus_dmamap_t		map;
1233 
1234 	if (c == NULL)
1235 		return (EINVAL);
1236 
1237 	if (m == NULL) {
1238 		MGETHDR(m_new, M_DONTWAIT, MT_DATA);
1239 		if (m_new == NULL)
1240 			return (ENOBUFS);
1241 
1242 		MCLGET(m_new, M_DONTWAIT);
1243 		if (!(m_new->m_flags & M_EXT)) {
1244 			m_freem(m_new);
1245 			return (ENOBUFS);
1246 		}
1247 	} else {
1248 		m_new = m;
1249 		m_new->m_data = m_new->m_ext.ext_buf;
1250 	}
1251 
1252 	m_new->m_len = m_new->m_pkthdr.len = MCLBYTES;
1253 
1254 	if (bus_dmamap_load_mbuf(sc->sc_dmat, sc->sc_rx_sparemap, m_new,
1255 	    BUS_DMA_NOWAIT)) {
1256 		m_freem(m_new);
1257 		return (ENOBUFS);
1258 	}
1259 
1260 	map = c->map;
1261 	c->map = sc->sc_rx_sparemap;
1262 	sc->sc_rx_sparemap = map;
1263 
1264 	bus_dmamap_sync(sc->sc_dmat, c->map, 0, c->map->dm_mapsize,
1265 	    BUS_DMASYNC_PREREAD);
1266 
1267 	c->sis_mbuf = m_new;
1268 	c->sis_ptr = c->map->dm_segs[0].ds_addr;
1269 	c->sis_ctl = ETHER_MAX_DIX_LEN;
1270 
1271 	bus_dmamap_sync(sc->sc_dmat, sc->sc_listmap,
1272 	    ((caddr_t)c - sc->sc_listkva), sizeof(struct sis_desc),
1273 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1274 
1275 	return (0);
1276 }
1277 
1278 /*
1279  * A frame has been uploaded: pass the resulting mbuf chain up to
1280  * the higher level protocols.
1281  */
1282 void
1283 sis_rxeof(struct sis_softc *sc)
1284 {
1285 	struct mbuf		*m;
1286 	struct ifnet		*ifp;
1287 	struct sis_desc		*cur_rx;
1288 	int			total_len = 0;
1289 	u_int32_t		rxstat;
1290 
1291 	ifp = &sc->arpcom.ac_if;
1292 
1293 	for(cur_rx = sc->sis_cdata.sis_rx_pdsc; SIS_OWNDESC(cur_rx);
1294 	    cur_rx = cur_rx->sis_nextdesc) {
1295 
1296 		bus_dmamap_sync(sc->sc_dmat, sc->sc_listmap,
1297 		    ((caddr_t)cur_rx - sc->sc_listkva),
1298 		    sizeof(struct sis_desc),
1299 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1300 
1301 		rxstat = cur_rx->sis_rxstat;
1302 		m = cur_rx->sis_mbuf;
1303 		cur_rx->sis_mbuf = NULL;
1304 		total_len = SIS_RXBYTES(cur_rx);
1305 
1306 		/*
1307 		 * If an error occurs, update stats, clear the
1308 		 * status word and leave the mbuf cluster in place:
1309 		 * it should simply get re-used next time this descriptor
1310 	 	 * comes up in the ring. However, don't report long
1311 		 * frames as errors since they could be VLANs.
1312 		 */
1313 		if (rxstat & SIS_RXSTAT_GIANT &&
1314 		    total_len <= (ETHER_MAX_DIX_LEN - ETHER_CRC_LEN))
1315 			rxstat &= ~SIS_RXSTAT_GIANT;
1316 		if (SIS_RXSTAT_ERROR(rxstat)) {
1317 			ifp->if_ierrors++;
1318 			if (rxstat & SIS_RXSTAT_COLL)
1319 				ifp->if_collisions++;
1320 			sis_newbuf(sc, cur_rx, m);
1321 			continue;
1322 		}
1323 
1324 		/* No errors; receive the packet. */
1325 		bus_dmamap_sync(sc->sc_dmat, cur_rx->map, 0,
1326 		    cur_rx->map->dm_mapsize, BUS_DMASYNC_POSTREAD);
1327 #ifndef __STRICT_ALIGNMENT
1328 		/*
1329 		 * On some architectures, we do not have alignment problems,
1330 		 * so try to allocate a new buffer for the receive ring, and
1331 		 * pass up the one where the packet is already, saving the
1332 		 * expensive copy done in m_devget().
1333 		 * If we are on an architecture with alignment problems, or
1334 		 * if the allocation fails, then use m_devget and leave the
1335 		 * existing buffer in the receive ring.
1336 		 */
1337 		if (sis_newbuf(sc, cur_rx, NULL) == 0) {
1338 			m->m_pkthdr.rcvif = ifp;
1339 			m->m_pkthdr.len = m->m_len = total_len;
1340 		} else
1341 #endif
1342 		{
1343 			struct mbuf *m0;
1344 			m0 = m_devget(mtod(m, char *), total_len, ETHER_ALIGN,
1345 			    ifp, NULL);
1346 			sis_newbuf(sc, cur_rx, m);
1347 			if (m0 == NULL) {
1348 				ifp->if_ierrors++;
1349 				continue;
1350 			}
1351 			m = m0;
1352 		}
1353 
1354 		ifp->if_ipackets++;
1355 
1356 #if NBPFILTER > 0
1357 		if (ifp->if_bpf)
1358 			bpf_mtap(ifp->if_bpf, m, BPF_DIRECTION_IN);
1359 #endif
1360 
1361 		/* pass it on. */
1362 		ether_input_mbuf(ifp, m);
1363 	}
1364 
1365 	sc->sis_cdata.sis_rx_pdsc = cur_rx;
1366 }
1367 
1368 void
1369 sis_rxeoc(struct sis_softc *sc)
1370 {
1371 	sis_rxeof(sc);
1372 	sis_init(sc);
1373 }
1374 
1375 /*
1376  * A frame was downloaded to the chip. It's safe for us to clean up
1377  * the list buffers.
1378  */
1379 
1380 void
1381 sis_txeof(struct sis_softc *sc)
1382 {
1383 	struct ifnet		*ifp;
1384 	u_int32_t		idx;
1385 
1386 	ifp = &sc->arpcom.ac_if;
1387 
1388 	/*
1389 	 * Go through our tx list and free mbufs for those
1390 	 * frames that have been transmitted.
1391 	 */
1392 	for (idx = sc->sis_cdata.sis_tx_cons; sc->sis_cdata.sis_tx_cnt > 0;
1393 	    sc->sis_cdata.sis_tx_cnt--, SIS_INC(idx, SIS_TX_LIST_CNT)) {
1394 		struct sis_desc *cur_tx = &sc->sis_ldata->sis_tx_list[idx];
1395 
1396 		bus_dmamap_sync(sc->sc_dmat, sc->sc_listmap,
1397 		    ((caddr_t)cur_tx - sc->sc_listkva),
1398 		    sizeof(struct sis_desc),
1399 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1400 
1401 		if (SIS_OWNDESC(cur_tx))
1402 			break;
1403 
1404 		if (cur_tx->sis_ctl & SIS_CMDSTS_MORE)
1405 			continue;
1406 
1407 		if (!(cur_tx->sis_ctl & SIS_CMDSTS_PKT_OK)) {
1408 			ifp->if_oerrors++;
1409 			if (cur_tx->sis_txstat & SIS_TXSTAT_EXCESSCOLLS)
1410 				ifp->if_collisions++;
1411 			if (cur_tx->sis_txstat & SIS_TXSTAT_OUTOFWINCOLL)
1412 				ifp->if_collisions++;
1413 		}
1414 
1415 		ifp->if_collisions +=
1416 		    (cur_tx->sis_txstat & SIS_TXSTAT_COLLCNT) >> 16;
1417 
1418 		ifp->if_opackets++;
1419 		if (cur_tx->map->dm_nsegs != 0) {
1420 			bus_dmamap_t map = cur_tx->map;
1421 
1422 			bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
1423 			    BUS_DMASYNC_POSTWRITE);
1424 			bus_dmamap_unload(sc->sc_dmat, map);
1425 		}
1426 		if (cur_tx->sis_mbuf != NULL) {
1427 			m_freem(cur_tx->sis_mbuf);
1428 			cur_tx->sis_mbuf = NULL;
1429 		}
1430 	}
1431 
1432 	if (idx != sc->sis_cdata.sis_tx_cons) {
1433 		/* we freed up some buffers */
1434 		sc->sis_cdata.sis_tx_cons = idx;
1435 		ifp->if_flags &= ~IFF_OACTIVE;
1436 	}
1437 
1438 	ifp->if_timer = (sc->sis_cdata.sis_tx_cnt == 0) ? 0 : 5;
1439 }
1440 
1441 void
1442 sis_tick(void *xsc)
1443 {
1444 	struct sis_softc	*sc = (struct sis_softc *)xsc;
1445 	struct mii_data		*mii;
1446 	struct ifnet		*ifp;
1447 	int			s;
1448 
1449 	s = splnet();
1450 
1451 	ifp = &sc->arpcom.ac_if;
1452 
1453 	mii = &sc->sc_mii;
1454 	mii_tick(mii);
1455 
1456 	if (!sc->sis_link && mii->mii_media_status & IFM_ACTIVE &&
1457 	    IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) {
1458 		sc->sis_link++;
1459 		if (!IFQ_IS_EMPTY(&ifp->if_snd))
1460 			sis_start(ifp);
1461 	}
1462 	timeout_add_sec(&sc->sis_timeout, 1);
1463 
1464 	splx(s);
1465 }
1466 
1467 int
1468 sis_intr(void *arg)
1469 {
1470 	struct sis_softc	*sc;
1471 	struct ifnet		*ifp;
1472 	u_int32_t		status;
1473 	int			claimed = 0;
1474 
1475 	sc = arg;
1476 	ifp = &sc->arpcom.ac_if;
1477 
1478 	if (sc->sis_stopped)	/* Most likely shared interrupt */
1479 		return (claimed);
1480 
1481 	/* Disable interrupts. */
1482 	CSR_WRITE_4(sc, SIS_IER, 0);
1483 
1484 	for (;;) {
1485 		/* Reading the ISR register clears all interrupts. */
1486 		status = CSR_READ_4(sc, SIS_ISR);
1487 
1488 		if ((status & SIS_INTRS) == 0)
1489 			break;
1490 
1491 		claimed = 1;
1492 
1493 		if (status &
1494 		    (SIS_ISR_TX_DESC_OK | SIS_ISR_TX_ERR |
1495 		     SIS_ISR_TX_OK | SIS_ISR_TX_IDLE))
1496 			sis_txeof(sc);
1497 
1498 		if (status &
1499 		    (SIS_ISR_RX_DESC_OK | SIS_ISR_RX_OK |
1500 		     SIS_ISR_RX_ERR | SIS_ISR_RX_IDLE))
1501 			sis_rxeof(sc);
1502 
1503 		if (status & SIS_ISR_RX_OFLOW)
1504 			sis_rxeoc(sc);
1505 
1506 #if 0
1507 		if (status & (SIS_ISR_RX_IDLE))
1508 			SIS_SETBIT(sc, SIS_CSR, SIS_CSR_RX_ENABLE);
1509 #endif
1510 
1511 		if (status & SIS_ISR_SYSERR) {
1512 			sis_reset(sc);
1513 			sis_init(sc);
1514 		}
1515 	}
1516 
1517 	/* Re-enable interrupts. */
1518 	CSR_WRITE_4(sc, SIS_IER, 1);
1519 
1520 	/*
1521 	 * XXX: Re-enable RX engine every time otherwise it occasionally
1522 	 * stops under unknown circumstances.
1523 	 */
1524 	SIS_SETBIT(sc, SIS_CSR, SIS_CSR_RX_ENABLE);
1525 
1526 	if (!IFQ_IS_EMPTY(&ifp->if_snd))
1527 		sis_start(ifp);
1528 
1529 	return (claimed);
1530 }
1531 
1532 /*
1533  * Encapsulate an mbuf chain in a descriptor by coupling the mbuf data
1534  * pointers to the fragment pointers.
1535  */
1536 int
1537 sis_encap(struct sis_softc *sc, struct mbuf *m_head, u_int32_t *txidx)
1538 {
1539 	struct sis_desc		*f = NULL;
1540 	int			frag, cur, i;
1541 	bus_dmamap_t		map;
1542 
1543 	map = sc->sc_tx_sparemap;
1544 	if (bus_dmamap_load_mbuf(sc->sc_dmat, map,
1545 	    m_head, BUS_DMA_NOWAIT) != 0)
1546 		return (ENOBUFS);
1547 
1548 	/*
1549  	 * Start packing the mbufs in this chain into
1550 	 * the fragment pointers. Stop when we run out
1551  	 * of fragments or hit the end of the mbuf chain.
1552 	 */
1553 	cur = frag = *txidx;
1554 
1555 	for (i = 0; i < map->dm_nsegs; i++) {
1556 		if ((SIS_TX_LIST_CNT - (sc->sis_cdata.sis_tx_cnt + i)) < 2)
1557 			return(ENOBUFS);
1558 		f = &sc->sis_ldata->sis_tx_list[frag];
1559 		f->sis_ctl = SIS_CMDSTS_MORE | map->dm_segs[i].ds_len;
1560 		f->sis_ptr = map->dm_segs[i].ds_addr;
1561 		if (i != 0)
1562 			f->sis_ctl |= SIS_CMDSTS_OWN;
1563 		cur = frag;
1564 		SIS_INC(frag, SIS_TX_LIST_CNT);
1565 	}
1566 
1567 	bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
1568 	    BUS_DMASYNC_PREWRITE);
1569 
1570 	sc->sis_ldata->sis_tx_list[cur].sis_mbuf = m_head;
1571 	sc->sis_ldata->sis_tx_list[cur].sis_ctl &= ~SIS_CMDSTS_MORE;
1572 	sc->sis_ldata->sis_tx_list[*txidx].sis_ctl |= SIS_CMDSTS_OWN;
1573 	sc->sis_cdata.sis_tx_cnt += i;
1574 	*txidx = frag;
1575 
1576 	bus_dmamap_sync(sc->sc_dmat, sc->sc_listmap,
1577 	    offsetof(struct sis_list_data, sis_tx_list[0]),
1578 	    sizeof(struct sis_desc) * SIS_TX_LIST_CNT,
1579 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1580 
1581 	return (0);
1582 }
1583 
1584 /*
1585  * Main transmit routine. To avoid having to do mbuf copies, we put pointers
1586  * to the mbuf data regions directly in the transmit lists. We also save a
1587  * copy of the pointers since the transmit list fragment pointers are
1588  * physical addresses.
1589  */
1590 
1591 void
1592 sis_start(struct ifnet *ifp)
1593 {
1594 	struct sis_softc	*sc;
1595 	struct mbuf		*m_head = NULL;
1596 	u_int32_t		idx, queued = 0;
1597 
1598 	sc = ifp->if_softc;
1599 
1600 	if (!sc->sis_link)
1601 		return;
1602 
1603 	idx = sc->sis_cdata.sis_tx_prod;
1604 
1605 	if (ifp->if_flags & IFF_OACTIVE)
1606 		return;
1607 
1608 	while(sc->sis_ldata->sis_tx_list[idx].sis_mbuf == NULL) {
1609 		IFQ_POLL(&ifp->if_snd, m_head);
1610 		if (m_head == NULL)
1611 			break;
1612 
1613 		if (sis_encap(sc, m_head, &idx)) {
1614 			ifp->if_flags |= IFF_OACTIVE;
1615 			break;
1616 		}
1617 
1618 		/* now we are committed to transmit the packet */
1619 		IFQ_DEQUEUE(&ifp->if_snd, m_head);
1620 
1621 		queued++;
1622 
1623 		/*
1624 		 * If there's a BPF listener, bounce a copy of this frame
1625 		 * to him.
1626 		 */
1627 #if NBPFILTER > 0
1628 		if (ifp->if_bpf)
1629 			bpf_mtap(ifp->if_bpf, m_head, BPF_DIRECTION_OUT);
1630 #endif
1631 	}
1632 
1633 	if (queued) {
1634 		/* Transmit */
1635 		sc->sis_cdata.sis_tx_prod = idx;
1636 		SIS_SETBIT(sc, SIS_CSR, SIS_CSR_TX_ENABLE);
1637 
1638 		/*
1639 		 * Set a timeout in case the chip goes out to lunch.
1640 		 */
1641 		ifp->if_timer = 5;
1642 	}
1643 }
1644 
1645 void
1646 sis_init(void *xsc)
1647 {
1648 	struct sis_softc	*sc = (struct sis_softc *)xsc;
1649 	struct ifnet		*ifp = &sc->arpcom.ac_if;
1650 	struct mii_data		*mii;
1651 	int			s;
1652 
1653 	s = splnet();
1654 
1655 	/*
1656 	 * Cancel pending I/O and free all RX/TX buffers.
1657 	 */
1658 	sis_stop(sc);
1659 
1660 #if NS_IHR_DELAY > 0
1661 	/* Configure interrupt holdoff register. */
1662 	if (sc->sis_type == SIS_TYPE_83815 && sc->sis_srr == NS_SRR_16A)
1663 		CSR_WRITE_4(sc, NS_IHR, NS_IHR_VALUE);
1664 #endif
1665 
1666 	mii = &sc->sc_mii;
1667 
1668 	/* Set MAC address */
1669 	if (sc->sis_type == SIS_TYPE_83815) {
1670 		CSR_WRITE_4(sc, SIS_RXFILT_CTL, NS_FILTADDR_PAR0);
1671 		CSR_WRITE_4(sc, SIS_RXFILT_DATA,
1672 		    ((u_int16_t *)sc->arpcom.ac_enaddr)[0]);
1673 		CSR_WRITE_4(sc, SIS_RXFILT_CTL, NS_FILTADDR_PAR1);
1674 		CSR_WRITE_4(sc, SIS_RXFILT_DATA,
1675 		    ((u_int16_t *)sc->arpcom.ac_enaddr)[1]);
1676 		CSR_WRITE_4(sc, SIS_RXFILT_CTL, NS_FILTADDR_PAR2);
1677 		CSR_WRITE_4(sc, SIS_RXFILT_DATA,
1678 		    ((u_int16_t *)sc->arpcom.ac_enaddr)[2]);
1679 	} else {
1680 		CSR_WRITE_4(sc, SIS_RXFILT_CTL, SIS_FILTADDR_PAR0);
1681 		CSR_WRITE_4(sc, SIS_RXFILT_DATA,
1682 		    ((u_int16_t *)sc->arpcom.ac_enaddr)[0]);
1683 		CSR_WRITE_4(sc, SIS_RXFILT_CTL, SIS_FILTADDR_PAR1);
1684 		CSR_WRITE_4(sc, SIS_RXFILT_DATA,
1685 		    ((u_int16_t *)sc->arpcom.ac_enaddr)[1]);
1686 		CSR_WRITE_4(sc, SIS_RXFILT_CTL, SIS_FILTADDR_PAR2);
1687 		CSR_WRITE_4(sc, SIS_RXFILT_DATA,
1688 		    ((u_int16_t *)sc->arpcom.ac_enaddr)[2]);
1689 	}
1690 
1691 	/* Init circular TX/RX lists. */
1692 	if (sis_ring_init(sc) != 0) {
1693 		printf("%s: initialization failed: no memory for rx buffers\n",
1694 		    sc->sc_dev.dv_xname);
1695 		sis_stop(sc);
1696 		splx(s);
1697 		return;
1698 	}
1699 
1700         /*
1701 	 * Short Cable Receive Errors (MP21.E)
1702 	 * also: Page 78 of the DP83815 data sheet (september 2002 version)
1703 	 * recommends the following register settings "for optimum
1704 	 * performance." for rev 15C.  The driver from NS also sets
1705 	 * the PHY_CR register for later versions.
1706 	 */
1707 	 if (sc->sis_type == SIS_TYPE_83815 && sc->sis_srr <= NS_SRR_15D) {
1708 		CSR_WRITE_4(sc, NS_PHY_PAGE, 0x0001);
1709 		CSR_WRITE_4(sc, NS_PHY_CR, 0x189C);
1710 		if (sc->sis_srr == NS_SRR_15C) {
1711 			/* set val for c2 */
1712 			CSR_WRITE_4(sc, NS_PHY_TDATA, 0x0000);
1713 			/* load/kill c2 */
1714 			CSR_WRITE_4(sc, NS_PHY_DSPCFG, 0x5040);
1715 			/* rais SD off, from 4 to c */
1716 			CSR_WRITE_4(sc, NS_PHY_SDCFG, 0x008C);
1717 		}
1718 		CSR_WRITE_4(sc, NS_PHY_PAGE, 0);
1719 	}
1720 
1721 	/*
1722 	 * For the NatSemi chip, we have to explicitly enable the
1723 	 * reception of ARP frames, as well as turn on the 'perfect
1724 	 * match' filter where we store the station address, otherwise
1725 	 * we won't receive unicasts meant for this host.
1726 	 */
1727 	if (sc->sis_type == SIS_TYPE_83815) {
1728 		SIS_SETBIT(sc, SIS_RXFILT_CTL, NS_RXFILTCTL_ARP);
1729 		SIS_SETBIT(sc, SIS_RXFILT_CTL, NS_RXFILTCTL_PERFECT);
1730 	}
1731 
1732 	/*
1733 	 * Set the capture broadcast bit to capture broadcast frames.
1734 	 */
1735 	if (ifp->if_flags & IFF_BROADCAST)
1736 		SIS_SETBIT(sc, SIS_RXFILT_CTL, SIS_RXFILTCTL_BROAD);
1737 	else
1738 		SIS_CLRBIT(sc, SIS_RXFILT_CTL, SIS_RXFILTCTL_BROAD);
1739 
1740 	/* Set promiscuous mode. */
1741 	sis_setpromisc(sc);
1742 
1743 	/*
1744 	 * Load the multicast filter.
1745 	 */
1746 	sis_setmulti(sc);
1747 
1748 	/* Turn the receive filter on */
1749 	SIS_SETBIT(sc, SIS_RXFILT_CTL, SIS_RXFILTCTL_ENABLE);
1750 
1751 	/*
1752 	 * Load the address of the RX and TX lists.
1753 	 */
1754 	CSR_WRITE_4(sc, SIS_RX_LISTPTR, sc->sc_listmap->dm_segs[0].ds_addr +
1755 	    offsetof(struct sis_list_data, sis_rx_list[0]));
1756 	CSR_WRITE_4(sc, SIS_TX_LISTPTR, sc->sc_listmap->dm_segs[0].ds_addr +
1757 	    offsetof(struct sis_list_data, sis_tx_list[0]));
1758 
1759 	/* SIS_CFG_EDB_MASTER_EN indicates the EDB bus is used instead of
1760 	 * the PCI bus. When this bit is set, the Max DMA Burst Size
1761 	 * for TX/RX DMA should be no larger than 16 double words.
1762 	 */
1763 	if (CSR_READ_4(sc, SIS_CFG) & SIS_CFG_EDB_MASTER_EN)
1764 		CSR_WRITE_4(sc, SIS_RX_CFG, SIS_RXCFG64);
1765 	else
1766 		CSR_WRITE_4(sc, SIS_RX_CFG, SIS_RXCFG256);
1767 
1768 	/* Accept Long Packets for VLAN support */
1769 	SIS_SETBIT(sc, SIS_RX_CFG, SIS_RXCFG_RX_JABBER);
1770 
1771 	/* Set TX configuration */
1772 	if (IFM_SUBTYPE(mii->mii_media_active) == IFM_10_T)
1773 		CSR_WRITE_4(sc, SIS_TX_CFG, SIS_TXCFG_10);
1774 	else
1775 		CSR_WRITE_4(sc, SIS_TX_CFG, SIS_TXCFG_100);
1776 
1777 	/* Set full/half duplex mode. */
1778 	if ((mii->mii_media_active & IFM_GMASK) == IFM_FDX) {
1779 		SIS_SETBIT(sc, SIS_TX_CFG,
1780 		    (SIS_TXCFG_IGN_HBEAT|SIS_TXCFG_IGN_CARR));
1781 		SIS_SETBIT(sc, SIS_RX_CFG, SIS_RXCFG_RX_TXPKTS);
1782 	} else {
1783 		SIS_CLRBIT(sc, SIS_TX_CFG,
1784 		    (SIS_TXCFG_IGN_HBEAT|SIS_TXCFG_IGN_CARR));
1785 		SIS_CLRBIT(sc, SIS_RX_CFG, SIS_RXCFG_RX_TXPKTS);
1786 	}
1787 
1788 	if (sc->sis_type == SIS_TYPE_83815 && sc->sis_srr >= NS_SRR_16A) {
1789 		/*
1790 		 * MPII03.D: Half Duplex Excessive Collisions.
1791 		 * Also page 49 in 83816 manual
1792 		 */
1793 		SIS_SETBIT(sc, SIS_TX_CFG, SIS_TXCFG_MPII03D);
1794  	}
1795 
1796 	if (sc->sis_type == SIS_TYPE_83815 && sc->sis_srr < NS_SRR_16A &&
1797 	     IFM_SUBTYPE(mii->mii_media_active) == IFM_100_TX) {
1798 		uint32_t reg;
1799 
1800 		/*
1801 		 * Short Cable Receive Errors (MP21.E)
1802 		 */
1803 		CSR_WRITE_4(sc, NS_PHY_PAGE, 0x0001);
1804 		reg = CSR_READ_4(sc, NS_PHY_DSPCFG) & 0xfff;
1805 		CSR_WRITE_4(sc, NS_PHY_DSPCFG, reg | 0x1000);
1806 		DELAY(100000);
1807 		reg = CSR_READ_4(sc, NS_PHY_TDATA) & 0xff;
1808 		if ((reg & 0x0080) == 0 || (reg > 0xd8 && reg <= 0xff)) {
1809 #ifdef DEBUG
1810 			printf("%s: Applying short cable fix (reg=%x)\n",
1811 			    sc->sc_dev.dv_xname, reg);
1812 #endif
1813 			CSR_WRITE_4(sc, NS_PHY_TDATA, 0x00e8);
1814 			reg = CSR_READ_4(sc, NS_PHY_DSPCFG);
1815 			SIS_SETBIT(sc, NS_PHY_DSPCFG, reg | 0x20);
1816 		}
1817 		CSR_WRITE_4(sc, NS_PHY_PAGE, 0);
1818 	}
1819 
1820 	/*
1821 	 * Enable interrupts.
1822 	 */
1823 	CSR_WRITE_4(sc, SIS_IMR, SIS_INTRS);
1824 	CSR_WRITE_4(sc, SIS_IER, 1);
1825 
1826 	/* Enable receiver and transmitter. */
1827 	SIS_CLRBIT(sc, SIS_CSR, SIS_CSR_TX_DISABLE|SIS_CSR_RX_DISABLE);
1828 	SIS_SETBIT(sc, SIS_CSR, SIS_CSR_RX_ENABLE);
1829 
1830 #ifdef notdef
1831 	mii_mediachg(mii);
1832 #endif
1833 
1834 	sc->sis_stopped = 0;
1835 	ifp->if_flags |= IFF_RUNNING;
1836 	ifp->if_flags &= ~IFF_OACTIVE;
1837 
1838 	splx(s);
1839 
1840 	timeout_add_sec(&sc->sis_timeout, 1);
1841 }
1842 
1843 /*
1844  * Set media options.
1845  */
1846 int
1847 sis_ifmedia_upd(struct ifnet *ifp)
1848 {
1849 	struct sis_softc	*sc;
1850 	struct mii_data		*mii;
1851 
1852 	sc = ifp->if_softc;
1853 
1854 	mii = &sc->sc_mii;
1855 	sc->sis_link = 0;
1856 	if (mii->mii_instance) {
1857 		struct mii_softc	*miisc;
1858 		LIST_FOREACH(miisc, &mii->mii_phys, mii_list)
1859 			mii_phy_reset(miisc);
1860 	}
1861 	mii_mediachg(mii);
1862 
1863 	return (0);
1864 }
1865 
1866 /*
1867  * Report current media status.
1868  */
1869 void
1870 sis_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
1871 {
1872 	struct sis_softc	*sc;
1873 	struct mii_data		*mii;
1874 
1875 	sc = ifp->if_softc;
1876 
1877 	mii = &sc->sc_mii;
1878 	mii_pollstat(mii);
1879 	ifmr->ifm_active = mii->mii_media_active;
1880 	ifmr->ifm_status = mii->mii_media_status;
1881 }
1882 
1883 int
1884 sis_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
1885 {
1886 	struct sis_softc	*sc = ifp->if_softc;
1887 	struct ifaddr		*ifa = (struct ifaddr *) data;
1888 	struct ifreq		*ifr = (struct ifreq *) data;
1889 	struct mii_data		*mii;
1890 	int			s, error = 0;
1891 
1892 	s = splnet();
1893 
1894 	switch(command) {
1895 	case SIOCSIFADDR:
1896 		ifp->if_flags |= IFF_UP;
1897 		if (!(ifp->if_flags & IFF_RUNNING))
1898 			sis_init(sc);
1899 #ifdef INET
1900 		if (ifa->ifa_addr->sa_family == AF_INET)
1901 			arp_ifinit(&sc->arpcom, ifa);
1902 #endif
1903 		break;
1904 
1905 	case SIOCSIFFLAGS:
1906 		if (ifp->if_flags & IFF_UP) {
1907 			if (ifp->if_flags & IFF_RUNNING &&
1908 			    (ifp->if_flags ^ sc->sc_if_flags) &
1909 			     IFF_PROMISC) {
1910 				sis_setpromisc(sc);
1911 				sis_setmulti(sc);
1912 			} else if (ifp->if_flags & IFF_RUNNING &&
1913 			    (ifp->if_flags ^ sc->sc_if_flags) &
1914 			     IFF_ALLMULTI) {
1915 				sis_setmulti(sc);
1916 			} else {
1917 				if (!(ifp->if_flags & IFF_RUNNING))
1918 					sis_init(sc);
1919 			}
1920 		} else {
1921 			if (ifp->if_flags & IFF_RUNNING)
1922 				sis_stop(sc);
1923 		}
1924 		sc->sc_if_flags = ifp->if_flags;
1925 		break;
1926 
1927 	case SIOCGIFMEDIA:
1928 	case SIOCSIFMEDIA:
1929 		mii = &sc->sc_mii;
1930 		error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command);
1931 		break;
1932 
1933 	default:
1934 		error = ether_ioctl(ifp, &sc->arpcom, command, data);
1935 	}
1936 
1937 	if (error == ENETRESET) {
1938 		if (ifp->if_flags & IFF_RUNNING)
1939 			sis_setmulti(sc);
1940 		error = 0;
1941 	}
1942 
1943 	splx(s);
1944 	return(error);
1945 }
1946 
1947 void
1948 sis_watchdog(struct ifnet *ifp)
1949 {
1950 	struct sis_softc	*sc;
1951 	int			s;
1952 
1953 	sc = ifp->if_softc;
1954 
1955 	if (sc->sis_stopped)
1956 		return;
1957 
1958 	ifp->if_oerrors++;
1959 	printf("%s: watchdog timeout\n", sc->sc_dev.dv_xname);
1960 
1961 	s = splnet();
1962 	sis_stop(sc);
1963 	sis_reset(sc);
1964 	sis_init(sc);
1965 
1966 	if (!IFQ_IS_EMPTY(&ifp->if_snd))
1967 		sis_start(ifp);
1968 
1969 	splx(s);
1970 }
1971 
1972 /*
1973  * Stop the adapter and free any mbufs allocated to the
1974  * RX and TX lists.
1975  */
1976 void
1977 sis_stop(struct sis_softc *sc)
1978 {
1979 	int			i;
1980 	struct ifnet		*ifp;
1981 
1982 	if (sc->sis_stopped)
1983 		return;
1984 
1985 	ifp = &sc->arpcom.ac_if;
1986 	ifp->if_timer = 0;
1987 
1988 	timeout_del(&sc->sis_timeout);
1989 
1990 	ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
1991 	sc->sis_stopped = 1;
1992 
1993 	CSR_WRITE_4(sc, SIS_IER, 0);
1994 	CSR_WRITE_4(sc, SIS_IMR, 0);
1995 	CSR_READ_4(sc, SIS_ISR); /* clear any interrupts already pending */
1996 	SIS_SETBIT(sc, SIS_CSR, SIS_CSR_TX_DISABLE|SIS_CSR_RX_DISABLE);
1997 	DELAY(1000);
1998 	CSR_WRITE_4(sc, SIS_TX_LISTPTR, 0);
1999 	CSR_WRITE_4(sc, SIS_RX_LISTPTR, 0);
2000 
2001 	sc->sis_link = 0;
2002 
2003 	/*
2004 	 * Free data in the RX lists.
2005 	 */
2006 	for (i = 0; i < SIS_RX_LIST_CNT_MAX; i++) {
2007 		if (sc->sis_ldata->sis_rx_list[i].map->dm_nsegs != 0) {
2008 			bus_dmamap_t map = sc->sis_ldata->sis_rx_list[i].map;
2009 
2010 			bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
2011 			    BUS_DMASYNC_POSTREAD);
2012 			bus_dmamap_unload(sc->sc_dmat, map);
2013 		}
2014 		if (sc->sis_ldata->sis_rx_list[i].sis_mbuf != NULL) {
2015 			m_freem(sc->sis_ldata->sis_rx_list[i].sis_mbuf);
2016 			sc->sis_ldata->sis_rx_list[i].sis_mbuf = NULL;
2017 		}
2018 		bzero((char *)&sc->sis_ldata->sis_rx_list[i],
2019 		    sizeof(struct sis_desc) - sizeof(bus_dmamap_t));
2020 	}
2021 
2022 	/*
2023 	 * Free the TX list buffers.
2024 	 */
2025 	for (i = 0; i < SIS_TX_LIST_CNT; i++) {
2026 		if (sc->sis_ldata->sis_tx_list[i].map->dm_nsegs != 0) {
2027 			bus_dmamap_t map = sc->sis_ldata->sis_tx_list[i].map;
2028 
2029 			bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
2030 			    BUS_DMASYNC_POSTWRITE);
2031 			bus_dmamap_unload(sc->sc_dmat, map);
2032 		}
2033 		if (sc->sis_ldata->sis_tx_list[i].sis_mbuf != NULL) {
2034 			m_freem(sc->sis_ldata->sis_tx_list[i].sis_mbuf);
2035 			sc->sis_ldata->sis_tx_list[i].sis_mbuf = NULL;
2036 		}
2037 		bzero((char *)&sc->sis_ldata->sis_tx_list[i],
2038 		    sizeof(struct sis_desc) - sizeof(bus_dmamap_t));
2039 	}
2040 }
2041 
2042 /*
2043  * Stop all chip I/O so that the kernel's probe routines don't
2044  * get confused by errant DMAs when rebooting.
2045  */
2046 void
2047 sis_shutdown(void *v)
2048 {
2049 	struct sis_softc	*sc = (struct sis_softc *)v;
2050 
2051 	sis_stop(sc);
2052 }
2053