xref: /openbsd-src/sys/dev/pci/if_sis.c (revision f6aab3d83b51b91c24247ad2c2573574de475a82)
1 /*	$OpenBSD: if_sis.c,v 1.143 2023/11/10 15:51:20 bluhm Exp $ */
2 /*
3  * Copyright (c) 1997, 1998, 1999
4  *	Bill Paul <wpaul@ctr.columbia.edu>.  All rights reserved.
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions
8  * are met:
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice, this list of conditions and the following disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  * 3. All advertising materials mentioning features or use of this software
15  *    must display the following acknowledgement:
16  *	This product includes software developed by Bill Paul.
17  * 4. Neither the name of the author nor the names of any co-contributors
18  *    may be used to endorse or promote products derived from this software
19  *    without specific prior written permission.
20  *
21  * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
22  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24  * ARE DISCLAIMED.  IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
25  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
26  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
27  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
28  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
29  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
30  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
31  * THE POSSIBILITY OF SUCH DAMAGE.
32  *
33  * $FreeBSD: src/sys/pci/if_sis.c,v 1.30 2001/02/06 10:11:47 phk Exp $
34  */
35 
36 /*
37  * SiS 900/SiS 7016 fast ethernet PCI NIC driver. Datasheets are
38  * available from http://www.sis.com.tw.
39  *
40  * This driver also supports the NatSemi DP83815. Datasheets are
41  * available from http://www.national.com.
42  *
43  * Written by Bill Paul <wpaul@ee.columbia.edu>
44  * Electrical Engineering Department
45  * Columbia University, New York City
46  */
47 
48 /*
49  * The SiS 900 is a fairly simple chip. It uses bus master DMA with
50  * simple TX and RX descriptors of 3 longwords in size. The receiver
51  * has a single perfect filter entry for the station address and a
52  * 128-bit multicast hash table. The SiS 900 has a built-in MII-based
53  * transceiver while the 7016 requires an external transceiver chip.
54  * Both chips offer the standard bit-bang MII interface as well as
55  * an enhanced PHY interface which simplifies accessing MII registers.
56  *
57  * The only downside to this chipset is that RX descriptors must be
58  * longword aligned.
59  */
60 
61 #include "bpfilter.h"
62 
63 #include <sys/param.h>
64 #include <sys/systm.h>
65 #include <sys/mbuf.h>
66 #include <sys/socket.h>
67 #include <sys/ioctl.h>
68 #include <sys/errno.h>
69 #include <sys/malloc.h>
70 #include <sys/kernel.h>
71 #include <sys/timeout.h>
72 
73 #include <net/if.h>
74 
75 #include <netinet/in.h>
76 #include <netinet/if_ether.h>
77 
78 #include <net/if_media.h>
79 
80 #if NBPFILTER > 0
81 #include <net/bpf.h>
82 #endif
83 
84 #include <sys/device.h>
85 
86 #include <dev/mii/miivar.h>
87 
88 #include <dev/pci/pcireg.h>
89 #include <dev/pci/pcivar.h>
90 #include <dev/pci/pcidevs.h>
91 
92 #define SIS_USEIOSPACE
93 
94 #include <dev/pci/if_sisreg.h>
95 
96 int sis_probe(struct device *, void *, void *);
97 void sis_attach(struct device *, struct device *, void *);
98 int sis_activate(struct device *, int);
99 
100 const struct cfattach sis_ca = {
101 	sizeof(struct sis_softc), sis_probe, sis_attach, NULL,
102 	sis_activate
103 };
104 
105 struct cfdriver sis_cd = {
106 	NULL, "sis", DV_IFNET
107 };
108 
109 int sis_intr(void *);
110 void sis_fill_rx_ring(struct sis_softc *);
111 int sis_newbuf(struct sis_softc *, struct sis_desc *);
112 int sis_encap(struct sis_softc *, struct mbuf *, u_int32_t *);
113 void sis_rxeof(struct sis_softc *);
114 void sis_txeof(struct sis_softc *);
115 void sis_tick(void *);
116 void sis_start(struct ifnet *);
117 int sis_ioctl(struct ifnet *, u_long, caddr_t);
118 void sis_init(void *);
119 void sis_stop(struct sis_softc *);
120 void sis_watchdog(struct ifnet *);
121 int sis_ifmedia_upd(struct ifnet *);
122 void sis_ifmedia_sts(struct ifnet *, struct ifmediareq *);
123 
124 u_int16_t sis_reverse(u_int16_t);
125 void sis_delay(struct sis_softc *);
126 void sis_eeprom_idle(struct sis_softc *);
127 void sis_eeprom_putbyte(struct sis_softc *, int);
128 void sis_eeprom_getword(struct sis_softc *, int, u_int16_t *);
129 #if defined(__amd64__) || defined(__i386__)
130 void sis_read_cmos(struct sis_softc *, struct pci_attach_args *, caddr_t, int, int);
131 #endif
132 void sis_read_mac(struct sis_softc *, struct pci_attach_args *);
133 void sis_read_eeprom(struct sis_softc *, caddr_t, int, int, int);
134 void sis_read96x_mac(struct sis_softc *);
135 
136 void sis_mii_sync(struct sis_softc *);
137 void sis_mii_send(struct sis_softc *, u_int32_t, int);
138 int sis_mii_readreg(struct sis_softc *, struct sis_mii_frame *);
139 int sis_mii_writereg(struct sis_softc *, struct sis_mii_frame *);
140 int sis_miibus_readreg(struct device *, int, int);
141 void sis_miibus_writereg(struct device *, int, int, int);
142 void sis_miibus_statchg(struct device *);
143 
144 u_int32_t sis_mchash(struct sis_softc *, const uint8_t *);
145 void sis_iff(struct sis_softc *);
146 void sis_iff_ns(struct sis_softc *);
147 void sis_iff_sis(struct sis_softc *);
148 void sis_reset(struct sis_softc *);
149 int sis_ring_init(struct sis_softc *);
150 
151 #define SIS_SETBIT(sc, reg, x)				\
152 	CSR_WRITE_4(sc, reg,				\
153 		CSR_READ_4(sc, reg) | (x))
154 
155 #define SIS_CLRBIT(sc, reg, x)				\
156 	CSR_WRITE_4(sc, reg,				\
157 		CSR_READ_4(sc, reg) & ~(x))
158 
159 #define SIO_SET(x)					\
160 	CSR_WRITE_4(sc, SIS_EECTL, CSR_READ_4(sc, SIS_EECTL) | x)
161 
162 #define SIO_CLR(x)					\
163 	CSR_WRITE_4(sc, SIS_EECTL, CSR_READ_4(sc, SIS_EECTL) & ~x)
164 
165 const struct pci_matchid sis_devices[] = {
166 	{ PCI_VENDOR_SIS, PCI_PRODUCT_SIS_900 },
167 	{ PCI_VENDOR_SIS, PCI_PRODUCT_SIS_7016 },
168 	{ PCI_VENDOR_NS, PCI_PRODUCT_NS_DP83815 }
169 };
170 
171 /*
172  * Routine to reverse the bits in a word. Stolen almost
173  * verbatim from /usr/games/fortune.
174  */
175 u_int16_t
176 sis_reverse(u_int16_t n)
177 {
178 	n = ((n >>  1) & 0x5555) | ((n <<  1) & 0xaaaa);
179 	n = ((n >>  2) & 0x3333) | ((n <<  2) & 0xcccc);
180 	n = ((n >>  4) & 0x0f0f) | ((n <<  4) & 0xf0f0);
181 	n = ((n >>  8) & 0x00ff) | ((n <<  8) & 0xff00);
182 
183 	return (n);
184 }
185 
186 void
187 sis_delay(struct sis_softc *sc)
188 {
189 	int			idx;
190 
191 	for (idx = (300 / 33) + 1; idx > 0; idx--)
192 		CSR_READ_4(sc, SIS_CSR);
193 }
194 
195 void
196 sis_eeprom_idle(struct sis_softc *sc)
197 {
198 	int			i;
199 
200 	SIO_SET(SIS_EECTL_CSEL);
201 	sis_delay(sc);
202 	SIO_SET(SIS_EECTL_CLK);
203 	sis_delay(sc);
204 
205 	for (i = 0; i < 25; i++) {
206 		SIO_CLR(SIS_EECTL_CLK);
207 		sis_delay(sc);
208 		SIO_SET(SIS_EECTL_CLK);
209 		sis_delay(sc);
210 	}
211 
212 	SIO_CLR(SIS_EECTL_CLK);
213 	sis_delay(sc);
214 	SIO_CLR(SIS_EECTL_CSEL);
215 	sis_delay(sc);
216 	CSR_WRITE_4(sc, SIS_EECTL, 0x00000000);
217 }
218 
219 /*
220  * Send a read command and address to the EEPROM, check for ACK.
221  */
222 void
223 sis_eeprom_putbyte(struct sis_softc *sc, int addr)
224 {
225 	int			d, i;
226 
227 	d = addr | SIS_EECMD_READ;
228 
229 	/*
230 	 * Feed in each bit and strobe the clock.
231 	 */
232 	for (i = 0x400; i; i >>= 1) {
233 		if (d & i)
234 			SIO_SET(SIS_EECTL_DIN);
235 		else
236 			SIO_CLR(SIS_EECTL_DIN);
237 		sis_delay(sc);
238 		SIO_SET(SIS_EECTL_CLK);
239 		sis_delay(sc);
240 		SIO_CLR(SIS_EECTL_CLK);
241 		sis_delay(sc);
242 	}
243 }
244 
245 /*
246  * Read a word of data stored in the EEPROM at address 'addr.'
247  */
248 void
249 sis_eeprom_getword(struct sis_softc *sc, int addr, u_int16_t *dest)
250 {
251 	int			i;
252 	u_int16_t		word = 0;
253 
254 	/* Force EEPROM to idle state. */
255 	sis_eeprom_idle(sc);
256 
257 	/* Enter EEPROM access mode. */
258 	sis_delay(sc);
259 	SIO_CLR(SIS_EECTL_CLK);
260 	sis_delay(sc);
261 	SIO_SET(SIS_EECTL_CSEL);
262 	sis_delay(sc);
263 
264 	/*
265 	 * Send address of word we want to read.
266 	 */
267 	sis_eeprom_putbyte(sc, addr);
268 
269 	/*
270 	 * Start reading bits from EEPROM.
271 	 */
272 	for (i = 0x8000; i; i >>= 1) {
273 		SIO_SET(SIS_EECTL_CLK);
274 		sis_delay(sc);
275 		if (CSR_READ_4(sc, SIS_EECTL) & SIS_EECTL_DOUT)
276 			word |= i;
277 		sis_delay(sc);
278 		SIO_CLR(SIS_EECTL_CLK);
279 		sis_delay(sc);
280 	}
281 
282 	/* Turn off EEPROM access mode. */
283 	sis_eeprom_idle(sc);
284 
285 	*dest = word;
286 }
287 
288 /*
289  * Read a sequence of words from the EEPROM.
290  */
291 void
292 sis_read_eeprom(struct sis_softc *sc, caddr_t dest,
293     int off, int cnt, int swap)
294 {
295 	int			i;
296 	u_int16_t		word = 0, *ptr;
297 
298 	for (i = 0; i < cnt; i++) {
299 		sis_eeprom_getword(sc, off + i, &word);
300 		ptr = (u_int16_t *)(dest + (i * 2));
301 		if (swap)
302 			*ptr = letoh16(word);
303 		else
304 			*ptr = word;
305 	}
306 }
307 
308 #if defined(__amd64__) || defined(__i386__)
309 void
310 sis_read_cmos(struct sis_softc *sc, struct pci_attach_args *pa,
311     caddr_t dest, int off, int cnt)
312 {
313 	u_int32_t reg;
314 	int i;
315 
316 	reg = pci_conf_read(pa->pa_pc, pa->pa_tag, 0x48);
317 	pci_conf_write(pa->pa_pc, pa->pa_tag, 0x48, reg | 0x40);
318 
319 	for (i = 0; i < cnt; i++) {
320 		bus_space_write_1(pa->pa_iot, 0x0, 0x70, i + off);
321 		*(dest + i) = bus_space_read_1(pa->pa_iot, 0x0, 0x71);
322 	}
323 
324 	pci_conf_write(pa->pa_pc, pa->pa_tag, 0x48, reg & ~0x40);
325 }
326 #endif
327 
328 void
329 sis_read_mac(struct sis_softc *sc, struct pci_attach_args *pa)
330 {
331 	uint32_t rxfilt, csrsave;
332 	u_int16_t *enaddr = (u_int16_t *) &sc->arpcom.ac_enaddr;
333 
334 	rxfilt = CSR_READ_4(sc, SIS_RXFILT_CTL);
335 	csrsave = CSR_READ_4(sc, SIS_CSR);
336 
337 	CSR_WRITE_4(sc, SIS_CSR, SIS_CSR_RELOAD | csrsave);
338 	CSR_WRITE_4(sc, SIS_CSR, 0);
339 
340 	CSR_WRITE_4(sc, SIS_RXFILT_CTL, rxfilt & ~SIS_RXFILTCTL_ENABLE);
341 
342 	CSR_WRITE_4(sc, SIS_RXFILT_CTL, SIS_FILTADDR_PAR0);
343 	enaddr[0] = letoh16(CSR_READ_4(sc, SIS_RXFILT_DATA) & 0xffff);
344 	CSR_WRITE_4(sc, SIS_RXFILT_CTL, SIS_FILTADDR_PAR1);
345 	enaddr[1] = letoh16(CSR_READ_4(sc, SIS_RXFILT_DATA) & 0xffff);
346 	CSR_WRITE_4(sc, SIS_RXFILT_CTL, SIS_FILTADDR_PAR2);
347 	enaddr[2] = letoh16(CSR_READ_4(sc, SIS_RXFILT_DATA) & 0xffff);
348 
349 	CSR_WRITE_4(sc, SIS_RXFILT_CTL, rxfilt);
350 	CSR_WRITE_4(sc, SIS_CSR, csrsave);
351 }
352 
353 void
354 sis_read96x_mac(struct sis_softc *sc)
355 {
356 	int i;
357 
358 	SIO_SET(SIS96x_EECTL_REQ);
359 
360 	for (i = 0; i < 2000; i++) {
361 		if ((CSR_READ_4(sc, SIS_EECTL) & SIS96x_EECTL_GNT)) {
362 			sis_read_eeprom(sc, (caddr_t)&sc->arpcom.ac_enaddr,
363 			    SIS_EE_NODEADDR, 3, 1);
364 			break;
365 		} else
366 			DELAY(1);
367 	}
368 
369 	SIO_SET(SIS96x_EECTL_DONE);
370 }
371 
372 /*
373  * Sync the PHYs by setting data bit and strobing the clock 32 times.
374  */
375 void
376 sis_mii_sync(struct sis_softc *sc)
377 {
378 	int			i;
379 
380  	SIO_SET(SIS_MII_DIR|SIS_MII_DATA);
381 
382  	for (i = 0; i < 32; i++) {
383  		SIO_SET(SIS_MII_CLK);
384  		DELAY(1);
385  		SIO_CLR(SIS_MII_CLK);
386  		DELAY(1);
387  	}
388 }
389 
390 /*
391  * Clock a series of bits through the MII.
392  */
393 void
394 sis_mii_send(struct sis_softc *sc, u_int32_t bits, int cnt)
395 {
396 	int			i;
397 
398 	SIO_CLR(SIS_MII_CLK);
399 
400 	for (i = (0x1 << (cnt - 1)); i; i >>= 1) {
401 		if (bits & i)
402 			SIO_SET(SIS_MII_DATA);
403 		else
404 			SIO_CLR(SIS_MII_DATA);
405 		DELAY(1);
406 		SIO_CLR(SIS_MII_CLK);
407 		DELAY(1);
408 		SIO_SET(SIS_MII_CLK);
409 	}
410 }
411 
412 /*
413  * Read an PHY register through the MII.
414  */
415 int
416 sis_mii_readreg(struct sis_softc *sc, struct sis_mii_frame *frame)
417 {
418 	int			i, ack, s;
419 
420 	s = splnet();
421 
422 	/*
423 	 * Set up frame for RX.
424 	 */
425 	frame->mii_stdelim = SIS_MII_STARTDELIM;
426 	frame->mii_opcode = SIS_MII_READOP;
427 	frame->mii_turnaround = 0;
428 	frame->mii_data = 0;
429 
430 	/*
431  	 * Turn on data xmit.
432 	 */
433 	SIO_SET(SIS_MII_DIR);
434 
435 	sis_mii_sync(sc);
436 
437 	/*
438 	 * Send command/address info.
439 	 */
440 	sis_mii_send(sc, frame->mii_stdelim, 2);
441 	sis_mii_send(sc, frame->mii_opcode, 2);
442 	sis_mii_send(sc, frame->mii_phyaddr, 5);
443 	sis_mii_send(sc, frame->mii_regaddr, 5);
444 
445 	/* Idle bit */
446 	SIO_CLR((SIS_MII_CLK|SIS_MII_DATA));
447 	DELAY(1);
448 	SIO_SET(SIS_MII_CLK);
449 	DELAY(1);
450 
451 	/* Turn off xmit. */
452 	SIO_CLR(SIS_MII_DIR);
453 
454 	/* Check for ack */
455 	SIO_CLR(SIS_MII_CLK);
456 	DELAY(1);
457 	ack = CSR_READ_4(sc, SIS_EECTL) & SIS_MII_DATA;
458 	SIO_SET(SIS_MII_CLK);
459 	DELAY(1);
460 
461 	/*
462 	 * Now try reading data bits. If the ack failed, we still
463 	 * need to clock through 16 cycles to keep the PHY(s) in sync.
464 	 */
465 	if (ack) {
466 		for(i = 0; i < 16; i++) {
467 			SIO_CLR(SIS_MII_CLK);
468 			DELAY(1);
469 			SIO_SET(SIS_MII_CLK);
470 			DELAY(1);
471 		}
472 		goto fail;
473 	}
474 
475 	for (i = 0x8000; i; i >>= 1) {
476 		SIO_CLR(SIS_MII_CLK);
477 		DELAY(1);
478 		if (!ack) {
479 			if (CSR_READ_4(sc, SIS_EECTL) & SIS_MII_DATA)
480 				frame->mii_data |= i;
481 			DELAY(1);
482 		}
483 		SIO_SET(SIS_MII_CLK);
484 		DELAY(1);
485 	}
486 
487 fail:
488 
489 	SIO_CLR(SIS_MII_CLK);
490 	DELAY(1);
491 	SIO_SET(SIS_MII_CLK);
492 	DELAY(1);
493 
494 	splx(s);
495 
496 	if (ack)
497 		return (1);
498 	return (0);
499 }
500 
501 /*
502  * Write to a PHY register through the MII.
503  */
504 int
505 sis_mii_writereg(struct sis_softc *sc, struct sis_mii_frame *frame)
506 {
507 	int			s;
508 
509 	s = splnet();
510  	/*
511  	 * Set up frame for TX.
512  	 */
513 
514  	frame->mii_stdelim = SIS_MII_STARTDELIM;
515  	frame->mii_opcode = SIS_MII_WRITEOP;
516  	frame->mii_turnaround = SIS_MII_TURNAROUND;
517 
518  	/*
519   	 * Turn on data output.
520  	 */
521  	SIO_SET(SIS_MII_DIR);
522 
523  	sis_mii_sync(sc);
524 
525  	sis_mii_send(sc, frame->mii_stdelim, 2);
526  	sis_mii_send(sc, frame->mii_opcode, 2);
527  	sis_mii_send(sc, frame->mii_phyaddr, 5);
528  	sis_mii_send(sc, frame->mii_regaddr, 5);
529  	sis_mii_send(sc, frame->mii_turnaround, 2);
530  	sis_mii_send(sc, frame->mii_data, 16);
531 
532  	/* Idle bit. */
533  	SIO_SET(SIS_MII_CLK);
534  	DELAY(1);
535  	SIO_CLR(SIS_MII_CLK);
536  	DELAY(1);
537 
538  	/*
539  	 * Turn off xmit.
540  	 */
541  	SIO_CLR(SIS_MII_DIR);
542 
543  	splx(s);
544 
545  	return (0);
546 }
547 
548 int
549 sis_miibus_readreg(struct device *self, int phy, int reg)
550 {
551 	struct sis_softc	*sc = (struct sis_softc *)self;
552 	struct sis_mii_frame    frame;
553 
554 	if (sc->sis_type == SIS_TYPE_83815) {
555 		if (phy != 0)
556 			return (0);
557 		/*
558 		 * The NatSemi chip can take a while after
559 		 * a reset to come ready, during which the BMSR
560 		 * returns a value of 0. This is *never* supposed
561 		 * to happen: some of the BMSR bits are meant to
562 		 * be hardwired in the on position, and this can
563 		 * confuse the miibus code a bit during the probe
564 		 * and attach phase. So we make an effort to check
565 		 * for this condition and wait for it to clear.
566 		 */
567 		if (!CSR_READ_4(sc, NS_BMSR))
568 			DELAY(1000);
569 		return CSR_READ_4(sc, NS_BMCR + (reg * 4));
570 	}
571 
572 	/*
573 	 * Chipsets < SIS_635 seem not to be able to read/write
574 	 * through mdio. Use the enhanced PHY access register
575 	 * again for them.
576 	 */
577 	if (sc->sis_type == SIS_TYPE_900 &&
578 	    sc->sis_rev < SIS_REV_635) {
579 		int i, val = 0;
580 
581 		if (phy != 0)
582 			return (0);
583 
584 		CSR_WRITE_4(sc, SIS_PHYCTL,
585 		    (phy << 11) | (reg << 6) | SIS_PHYOP_READ);
586 		SIS_SETBIT(sc, SIS_PHYCTL, SIS_PHYCTL_ACCESS);
587 
588 		for (i = 0; i < SIS_TIMEOUT; i++) {
589 			if (!(CSR_READ_4(sc, SIS_PHYCTL) & SIS_PHYCTL_ACCESS))
590 				break;
591 		}
592 
593 		if (i == SIS_TIMEOUT) {
594 			printf("%s: PHY failed to come ready\n",
595 			    sc->sc_dev.dv_xname);
596 			return (0);
597 		}
598 
599 		val = (CSR_READ_4(sc, SIS_PHYCTL) >> 16) & 0xFFFF;
600 
601 		if (val == 0xFFFF)
602 			return (0);
603 
604 		return (val);
605 	} else {
606 		bzero(&frame, sizeof(frame));
607 
608 		frame.mii_phyaddr = phy;
609 		frame.mii_regaddr = reg;
610 		sis_mii_readreg(sc, &frame);
611 
612 		return (frame.mii_data);
613 	}
614 }
615 
616 void
617 sis_miibus_writereg(struct device *self, int phy, int reg, int data)
618 {
619 	struct sis_softc	*sc = (struct sis_softc *)self;
620 	struct sis_mii_frame	frame;
621 
622 	if (sc->sis_type == SIS_TYPE_83815) {
623 		if (phy != 0)
624 			return;
625 		CSR_WRITE_4(sc, NS_BMCR + (reg * 4), data);
626 		return;
627 	}
628 
629 	/*
630 	 * Chipsets < SIS_635 seem not to be able to read/write
631 	 * through mdio. Use the enhanced PHY access register
632 	 * again for them.
633 	 */
634 	if (sc->sis_type == SIS_TYPE_900 &&
635 	    sc->sis_rev < SIS_REV_635) {
636 		int i;
637 
638 		if (phy != 0)
639 			return;
640 
641 		CSR_WRITE_4(sc, SIS_PHYCTL, (data << 16) | (phy << 11) |
642 		    (reg << 6) | SIS_PHYOP_WRITE);
643 		SIS_SETBIT(sc, SIS_PHYCTL, SIS_PHYCTL_ACCESS);
644 
645 		for (i = 0; i < SIS_TIMEOUT; i++) {
646 			if (!(CSR_READ_4(sc, SIS_PHYCTL) & SIS_PHYCTL_ACCESS))
647 				break;
648 		}
649 
650 		if (i == SIS_TIMEOUT)
651 			printf("%s: PHY failed to come ready\n",
652 			    sc->sc_dev.dv_xname);
653 	} else {
654 		bzero(&frame, sizeof(frame));
655 
656 		frame.mii_phyaddr = phy;
657 		frame.mii_regaddr = reg;
658 		frame.mii_data = data;
659 		sis_mii_writereg(sc, &frame);
660 	}
661 }
662 
663 void
664 sis_miibus_statchg(struct device *self)
665 {
666 	struct sis_softc	*sc = (struct sis_softc *)self;
667 	struct ifnet		*ifp = &sc->arpcom.ac_if;
668 	struct mii_data		*mii = &sc->sc_mii;
669 
670 	if ((ifp->if_flags & IFF_RUNNING) == 0)
671 		return;
672 
673 	sc->sis_link = 0;
674 	if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID)) ==
675 	    (IFM_ACTIVE | IFM_AVALID)) {
676 		switch (IFM_SUBTYPE(mii->mii_media_active)) {
677 		case IFM_10_T:
678 			CSR_WRITE_4(sc, SIS_TX_CFG, SIS_TXCFG_10);
679 			sc->sis_link++;
680 			break;
681 		case IFM_100_TX:
682 			CSR_WRITE_4(sc, SIS_TX_CFG, SIS_TXCFG_100);
683 			sc->sis_link++;
684 			break;
685 		default:
686 			break;
687 		}
688 	}
689 
690 	if (!sc->sis_link) {
691 		/*
692 		 * Stopping MACs seem to reset SIS_TX_LISTPTR and
693 		 * SIS_RX_LISTPTR which in turn requires resetting
694 		 * TX/RX buffers.  So just don't do anything for
695 		 * lost link.
696 		 */
697 		return;
698 	}
699 
700 	/* Set full/half duplex mode. */
701 	if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) != 0) {
702 		SIS_SETBIT(sc, SIS_TX_CFG,
703 		    (SIS_TXCFG_IGN_HBEAT | SIS_TXCFG_IGN_CARR));
704 		SIS_SETBIT(sc, SIS_RX_CFG, SIS_RXCFG_RX_TXPKTS);
705 	} else {
706 		SIS_CLRBIT(sc, SIS_TX_CFG,
707 		    (SIS_TXCFG_IGN_HBEAT | SIS_TXCFG_IGN_CARR));
708 		SIS_CLRBIT(sc, SIS_RX_CFG, SIS_RXCFG_RX_TXPKTS);
709 	}
710 
711 	if (sc->sis_type == SIS_TYPE_83815 && sc->sis_srr >= NS_SRR_16A) {
712 		/*
713 		 * MPII03.D: Half Duplex Excessive Collisions.
714 		 * Also page 49 in 83816 manual
715 		 */
716 		SIS_SETBIT(sc, SIS_TX_CFG, SIS_TXCFG_MPII03D);
717 	}
718 
719 	/*
720 	 * Some DP83815s experience problems when used with short
721 	 * (< 30m/100ft) Ethernet cables in 100baseTX mode.  This
722 	 * sequence adjusts the DSP's signal attenuation to fix the
723 	 * problem.
724 	 */
725 	if (sc->sis_type == SIS_TYPE_83815 && sc->sis_srr < NS_SRR_16A &&
726 	    IFM_SUBTYPE(mii->mii_media_active) == IFM_100_TX) {
727 		uint32_t reg;
728 
729 		CSR_WRITE_4(sc, NS_PHY_PAGE, 0x0001);
730 		reg = CSR_READ_4(sc, NS_PHY_DSPCFG) & 0xfff;
731 		CSR_WRITE_4(sc, NS_PHY_DSPCFG, reg | 0x1000);
732 		DELAY(100);
733 		reg = CSR_READ_4(sc, NS_PHY_TDATA) & 0xff;
734 		if ((reg & 0x0080) == 0 || (reg > 0xd8 && reg <= 0xff)) {
735 #ifdef DEBUG
736 			printf("%s: Applying short cable fix (reg=%x)\n",
737 			    sc->sc_dev.dv_xname, reg);
738 #endif
739 			CSR_WRITE_4(sc, NS_PHY_TDATA, 0x00e8);
740 			SIS_SETBIT(sc, NS_PHY_DSPCFG, 0x20);
741 		}
742 		CSR_WRITE_4(sc, NS_PHY_PAGE, 0);
743 	}
744 	/* Enable TX/RX MACs. */
745 	SIS_CLRBIT(sc, SIS_CSR, SIS_CSR_TX_DISABLE | SIS_CSR_RX_DISABLE);
746 	SIS_SETBIT(sc, SIS_CSR, SIS_CSR_TX_ENABLE | SIS_CSR_RX_ENABLE);
747 }
748 
749 u_int32_t
750 sis_mchash(struct sis_softc *sc, const uint8_t *addr)
751 {
752 	uint32_t		crc;
753 
754 	/* Compute CRC for the address value. */
755 	crc = ether_crc32_be(addr, ETHER_ADDR_LEN);
756 
757 	/*
758 	 * return the filter bit position
759 	 *
760 	 * The NatSemi chip has a 512-bit filter, which is
761 	 * different than the SiS, so we special-case it.
762 	 */
763 	if (sc->sis_type == SIS_TYPE_83815)
764 		return (crc >> 23);
765 	else if (sc->sis_rev >= SIS_REV_635 ||
766 	    sc->sis_rev == SIS_REV_900B)
767 		return (crc >> 24);
768 	else
769 		return (crc >> 25);
770 }
771 
772 void
773 sis_iff(struct sis_softc *sc)
774 {
775 	if (sc->sis_type == SIS_TYPE_83815)
776 		sis_iff_ns(sc);
777 	else
778 		sis_iff_sis(sc);
779 }
780 
781 void
782 sis_iff_ns(struct sis_softc *sc)
783 {
784 	struct ifnet		*ifp = &sc->arpcom.ac_if;
785 	struct arpcom		*ac = &sc->arpcom;
786 	struct ether_multi	*enm;
787 	struct ether_multistep  step;
788 	u_int32_t		h = 0, i, rxfilt;
789 	int			bit, index;
790 
791 	rxfilt = CSR_READ_4(sc, SIS_RXFILT_CTL);
792 	if (rxfilt & SIS_RXFILTCTL_ENABLE) {
793 		/*
794 		 * Filter should be disabled to program other bits.
795 		 */
796 		CSR_WRITE_4(sc, SIS_RXFILT_CTL, rxfilt & ~SIS_RXFILTCTL_ENABLE);
797 		CSR_READ_4(sc, SIS_RXFILT_CTL);
798 	}
799 	rxfilt &= ~(SIS_RXFILTCTL_ALLMULTI | SIS_RXFILTCTL_ALLPHYS |
800 	    NS_RXFILTCTL_ARP | SIS_RXFILTCTL_BROAD | NS_RXFILTCTL_MCHASH |
801 	    NS_RXFILTCTL_PERFECT);
802 	ifp->if_flags &= ~IFF_ALLMULTI;
803 
804 	/*
805 	 * Always accept ARP frames.
806 	 * Always accept broadcast frames.
807 	 * Always accept frames destined to our station address.
808 	 */
809 	rxfilt |= NS_RXFILTCTL_ARP | SIS_RXFILTCTL_BROAD |
810 	    NS_RXFILTCTL_PERFECT;
811 
812 	if (ifp->if_flags & IFF_PROMISC || ac->ac_multirangecnt > 0) {
813 		ifp->if_flags |= IFF_ALLMULTI;
814 		rxfilt |= SIS_RXFILTCTL_ALLMULTI;
815 		if (ifp->if_flags & IFF_PROMISC)
816 			rxfilt |= SIS_RXFILTCTL_ALLPHYS;
817 	} else {
818 		/*
819 		 * We have to explicitly enable the multicast hash table
820 		 * on the NatSemi chip if we want to use it, which we do.
821 		 */
822 		rxfilt |= NS_RXFILTCTL_MCHASH;
823 
824 		/* first, zot all the existing hash bits */
825 		for (i = 0; i < 32; i++) {
826 			CSR_WRITE_4(sc, SIS_RXFILT_CTL, NS_FILTADDR_FMEM_LO + (i * 2));
827 			CSR_WRITE_4(sc, SIS_RXFILT_DATA, 0);
828 		}
829 
830 		ETHER_FIRST_MULTI(step, ac, enm);
831 		while (enm != NULL) {
832 			h = sis_mchash(sc, enm->enm_addrlo);
833 
834 			index = h >> 3;
835 			bit = h & 0x1F;
836 
837 			CSR_WRITE_4(sc, SIS_RXFILT_CTL, NS_FILTADDR_FMEM_LO + index);
838 
839 			if (bit > 0xF)
840 				bit -= 0x10;
841 
842 			SIS_SETBIT(sc, SIS_RXFILT_DATA, (1 << bit));
843 
844 			ETHER_NEXT_MULTI(step, enm);
845 		}
846 	}
847 
848 	CSR_WRITE_4(sc, SIS_RXFILT_CTL, rxfilt);
849 	/* Turn the receive filter on. */
850 	CSR_WRITE_4(sc, SIS_RXFILT_CTL, rxfilt | SIS_RXFILTCTL_ENABLE);
851 	CSR_READ_4(sc, SIS_RXFILT_CTL);
852 }
853 
854 void
855 sis_iff_sis(struct sis_softc *sc)
856 {
857 	struct ifnet		*ifp = &sc->arpcom.ac_if;
858 	struct arpcom		*ac = &sc->arpcom;
859 	struct ether_multi	*enm;
860 	struct ether_multistep	step;
861 	u_int32_t		h, i, maxmulti, rxfilt;
862 	u_int16_t		hashes[16];
863 
864 	/* hash table size */
865 	if (sc->sis_rev >= SIS_REV_635 ||
866 	    sc->sis_rev == SIS_REV_900B)
867 		maxmulti = 16;
868 	else
869 		maxmulti = 8;
870 
871 	rxfilt = CSR_READ_4(sc, SIS_RXFILT_CTL);
872 	if (rxfilt & SIS_RXFILTCTL_ENABLE) {
873 		/*
874 		 * Filter should be disabled to program other bits.
875 		 */
876 		CSR_WRITE_4(sc, SIS_RXFILT_CTL, rxfilt & ~SIS_RXFILTCTL_ENABLE);
877 		CSR_READ_4(sc, SIS_RXFILT_CTL);
878 	}
879 	rxfilt &= ~(SIS_RXFILTCTL_ALLMULTI | SIS_RXFILTCTL_ALLPHYS |
880 	    SIS_RXFILTCTL_BROAD);
881 	ifp->if_flags &= ~IFF_ALLMULTI;
882 
883 	/*
884 	 * Always accept broadcast frames.
885 	 */
886 	rxfilt |= SIS_RXFILTCTL_BROAD;
887 
888 	if (ifp->if_flags & IFF_PROMISC || ac->ac_multirangecnt > 0 ||
889 	    ac->ac_multicnt > maxmulti) {
890 		ifp->if_flags |= IFF_ALLMULTI;
891 		rxfilt |= SIS_RXFILTCTL_ALLMULTI;
892 		if (ifp->if_flags & IFF_PROMISC)
893 			rxfilt |= SIS_RXFILTCTL_ALLPHYS;
894 
895 		for (i = 0; i < maxmulti; i++)
896 			hashes[i] = ~0;
897 	} else {
898 		for (i = 0; i < maxmulti; i++)
899 			hashes[i] = 0;
900 
901 		ETHER_FIRST_MULTI(step, ac, enm);
902 		while (enm != NULL) {
903 			h = sis_mchash(sc, enm->enm_addrlo);
904 
905 			hashes[h >> 4] |= 1 << (h & 0xf);
906 
907 			ETHER_NEXT_MULTI(step, enm);
908 		}
909 	}
910 
911 	for (i = 0; i < maxmulti; i++) {
912 		CSR_WRITE_4(sc, SIS_RXFILT_CTL, (4 + i) << 16);
913 		CSR_WRITE_4(sc, SIS_RXFILT_DATA, hashes[i]);
914 	}
915 
916 	CSR_WRITE_4(sc, SIS_RXFILT_CTL, rxfilt);
917 	/* Turn the receive filter on. */
918 	CSR_WRITE_4(sc, SIS_RXFILT_CTL, rxfilt | SIS_RXFILTCTL_ENABLE);
919 	CSR_READ_4(sc, SIS_RXFILT_CTL);
920 }
921 
922 void
923 sis_reset(struct sis_softc *sc)
924 {
925 	int			i;
926 
927 	SIS_SETBIT(sc, SIS_CSR, SIS_CSR_RESET);
928 
929 	for (i = 0; i < SIS_TIMEOUT; i++) {
930 		if (!(CSR_READ_4(sc, SIS_CSR) & SIS_CSR_RESET))
931 			break;
932 	}
933 
934 	if (i == SIS_TIMEOUT)
935 		printf("%s: reset never completed\n", sc->sc_dev.dv_xname);
936 
937 	/* Wait a little while for the chip to get its brains in order. */
938 	DELAY(1000);
939 
940 	/*
941 	 * If this is a NetSemi chip, make sure to clear
942 	 * PME mode.
943 	 */
944 	if (sc->sis_type == SIS_TYPE_83815) {
945 		CSR_WRITE_4(sc, NS_CLKRUN, NS_CLKRUN_PMESTS);
946 		CSR_WRITE_4(sc, NS_CLKRUN, 0);
947 	}
948 }
949 
950 /*
951  * Probe for an SiS chip. Check the PCI vendor and device
952  * IDs against our list and return a device name if we find a match.
953  */
954 int
955 sis_probe(struct device *parent, void *match, void *aux)
956 {
957 	return (pci_matchbyid((struct pci_attach_args *)aux, sis_devices,
958 	    nitems(sis_devices)));
959 }
960 
961 /*
962  * Attach the interface. Allocate softc structures, do ifmedia
963  * setup and ethernet/BPF attach.
964  */
965 void
966 sis_attach(struct device *parent, struct device *self, void *aux)
967 {
968 	int			i;
969 	const char		*intrstr = NULL;
970 	struct sis_softc	*sc = (struct sis_softc *)self;
971 	struct pci_attach_args	*pa = aux;
972 	pci_chipset_tag_t	pc = pa->pa_pc;
973 	pci_intr_handle_t	ih;
974 	struct ifnet		*ifp;
975 	bus_size_t		size;
976 
977 	sc->sis_stopped = 1;
978 
979 	pci_set_powerstate(pa->pa_pc, pa->pa_tag, PCI_PMCSR_STATE_D0);
980 
981 	/*
982 	 * Map control/status registers.
983 	 */
984 
985 #ifdef SIS_USEIOSPACE
986 	if (pci_mapreg_map(pa, SIS_PCI_LOIO, PCI_MAPREG_TYPE_IO, 0,
987 	    &sc->sis_btag, &sc->sis_bhandle, NULL, &size, 0)) {
988 		printf(": can't map i/o space\n");
989 		return;
990  	}
991 #else
992 	if (pci_mapreg_map(pa, SIS_PCI_LOMEM, PCI_MAPREG_TYPE_MEM, 0,
993 	    &sc->sis_btag, &sc->sis_bhandle, NULL, &size, 0)) {
994  		printf(": can't map mem space\n");
995 		return;
996  	}
997 #endif
998 
999 	/* Allocate interrupt */
1000 	if (pci_intr_map(pa, &ih)) {
1001 		printf(": couldn't map interrupt\n");
1002 		goto fail_1;
1003 	}
1004 	intrstr = pci_intr_string(pc, ih);
1005 	sc->sc_ih = pci_intr_establish(pc, ih, IPL_NET, sis_intr, sc,
1006 	    self->dv_xname);
1007 	if (sc->sc_ih == NULL) {
1008 		printf(": couldn't establish interrupt");
1009 		if (intrstr != NULL)
1010 			printf(" at %s", intrstr);
1011 		printf("\n");
1012 		goto fail_1;
1013 	}
1014 
1015 	switch (PCI_PRODUCT(pa->pa_id)) {
1016 	case PCI_PRODUCT_SIS_900:
1017 		sc->sis_type = SIS_TYPE_900;
1018 		break;
1019 	case PCI_PRODUCT_SIS_7016:
1020 		sc->sis_type = SIS_TYPE_7016;
1021 		break;
1022 	case PCI_PRODUCT_NS_DP83815:
1023 		sc->sis_type = SIS_TYPE_83815;
1024 		break;
1025 	default:
1026 		break;
1027 	}
1028 	sc->sis_rev = PCI_REVISION(pa->pa_class);
1029 
1030 	/* Reset the adapter. */
1031 	sis_reset(sc);
1032 
1033 	if (sc->sis_type == SIS_TYPE_900 &&
1034 	   (sc->sis_rev == SIS_REV_635 ||
1035 	    sc->sis_rev == SIS_REV_900B)) {
1036 		SIO_SET(SIS_CFG_RND_CNT);
1037 		SIO_SET(SIS_CFG_PERR_DETECT);
1038 	}
1039 
1040 	/*
1041 	 * Get station address from the EEPROM.
1042 	 */
1043 	switch (PCI_VENDOR(pa->pa_id)) {
1044 	case PCI_VENDOR_NS:
1045 		sc->sis_srr = CSR_READ_4(sc, NS_SRR);
1046 
1047 		if (sc->sis_srr == NS_SRR_15C)
1048 			printf(", DP83815C");
1049 		else if (sc->sis_srr == NS_SRR_15D)
1050 			printf(", DP83815D");
1051 		else if (sc->sis_srr == NS_SRR_16A)
1052 			printf(", DP83816A");
1053 		else
1054 			printf(", srr %x", sc->sis_srr);
1055 
1056 		/*
1057 		 * Reading the MAC address out of the EEPROM on
1058 		 * the NatSemi chip takes a bit more work than
1059 		 * you'd expect. The address spans 4 16-bit words,
1060 		 * with the first word containing only a single bit.
1061 		 * You have to shift everything over one bit to
1062 		 * get it aligned properly. Also, the bits are
1063 		 * stored backwards (the LSB is really the MSB,
1064 		 * and so on) so you have to reverse them in order
1065 		 * to get the MAC address into the form we want.
1066 		 * Why? Who the hell knows.
1067 		 */
1068 		{
1069 			u_int16_t		tmp[4];
1070 
1071 			sis_read_eeprom(sc, (caddr_t)&tmp, NS_EE_NODEADDR,
1072 			    4, 0);
1073 
1074 			/* Shift everything over one bit. */
1075 			tmp[3] = tmp[3] >> 1;
1076 			tmp[3] |= tmp[2] << 15;
1077 			tmp[2] = tmp[2] >> 1;
1078 			tmp[2] |= tmp[1] << 15;
1079 			tmp[1] = tmp[1] >> 1;
1080 			tmp[1] |= tmp[0] << 15;
1081 
1082 			/* Now reverse all the bits. */
1083 			tmp[3] = letoh16(sis_reverse(tmp[3]));
1084 			tmp[2] = letoh16(sis_reverse(tmp[2]));
1085 			tmp[1] = letoh16(sis_reverse(tmp[1]));
1086 
1087 			bcopy(&tmp[1], sc->arpcom.ac_enaddr,
1088 			    ETHER_ADDR_LEN);
1089 		}
1090 		break;
1091 	case PCI_VENDOR_SIS:
1092 	default:
1093 #if defined(__amd64__) || defined(__i386__)
1094 		/*
1095 		 * If this is a SiS 630E chipset with an embedded
1096 		 * SiS 900 controller, we have to read the MAC address
1097 		 * from the APC CMOS RAM. Our method for doing this
1098 		 * is very ugly since we have to reach out and grab
1099 		 * ahold of hardware for which we cannot properly
1100 		 * allocate resources. This code is only compiled on
1101 		 * the i386 architecture since the SiS 630E chipset
1102 		 * is for x86 motherboards only. Note that there are
1103 		 * a lot of magic numbers in this hack. These are
1104 		 * taken from SiS's Linux driver. I'd like to replace
1105 		 * them with proper symbolic definitions, but that
1106 		 * requires some datasheets that I don't have access
1107 		 * to at the moment.
1108 		 */
1109 		if (sc->sis_rev == SIS_REV_630S ||
1110 		    sc->sis_rev == SIS_REV_630E)
1111 			sis_read_cmos(sc, pa, (caddr_t)&sc->arpcom.ac_enaddr,
1112 			    0x9, 6);
1113 		else
1114 #endif
1115 		if (sc->sis_rev == SIS_REV_96x)
1116 			sis_read96x_mac(sc);
1117 		else if (sc->sis_rev == SIS_REV_635 ||
1118 		    sc->sis_rev == SIS_REV_630ET ||
1119 		    sc->sis_rev == SIS_REV_630EA1)
1120 			sis_read_mac(sc, pa);
1121 		else
1122 			sis_read_eeprom(sc, (caddr_t)&sc->arpcom.ac_enaddr,
1123 			    SIS_EE_NODEADDR, 3, 1);
1124 		break;
1125 	}
1126 
1127 	printf(": %s, address %s\n", intrstr,
1128 	    ether_sprintf(sc->arpcom.ac_enaddr));
1129 
1130 	sc->sc_dmat = pa->pa_dmat;
1131 
1132 	if (bus_dmamem_alloc(sc->sc_dmat, sizeof(struct sis_list_data),
1133 	    PAGE_SIZE, 0, sc->sc_listseg, 1, &sc->sc_listnseg,
1134 	    BUS_DMA_NOWAIT | BUS_DMA_ZERO) != 0) {
1135 		printf(": can't alloc list mem\n");
1136 		goto fail_2;
1137 	}
1138 	if (bus_dmamem_map(sc->sc_dmat, sc->sc_listseg, sc->sc_listnseg,
1139 	    sizeof(struct sis_list_data), &sc->sc_listkva,
1140 	    BUS_DMA_NOWAIT) != 0) {
1141 		printf(": can't map list mem\n");
1142 		goto fail_2;
1143 	}
1144 	if (bus_dmamap_create(sc->sc_dmat, sizeof(struct sis_list_data), 1,
1145 	    sizeof(struct sis_list_data), 0, BUS_DMA_NOWAIT,
1146 	    &sc->sc_listmap) != 0) {
1147 		printf(": can't alloc list map\n");
1148 		goto fail_2;
1149 	}
1150 	if (bus_dmamap_load(sc->sc_dmat, sc->sc_listmap, sc->sc_listkva,
1151 	    sizeof(struct sis_list_data), NULL, BUS_DMA_NOWAIT) != 0) {
1152 		printf(": can't load list map\n");
1153 		goto fail_2;
1154 	}
1155 	sc->sis_ldata = (struct sis_list_data *)sc->sc_listkva;
1156 
1157 	for (i = 0; i < SIS_RX_LIST_CNT; i++) {
1158 		if (bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, MCLBYTES, 0,
1159 		    BUS_DMA_NOWAIT, &sc->sis_ldata->sis_rx_list[i].map) != 0) {
1160 			printf(": can't create rx map\n");
1161 			goto fail_2;
1162 		}
1163 	}
1164 
1165 	for (i = 0; i < SIS_TX_LIST_CNT; i++) {
1166 		if (bus_dmamap_create(sc->sc_dmat, MCLBYTES,
1167 		    SIS_MAXTXSEGS, MCLBYTES, 0, BUS_DMA_NOWAIT,
1168 		    &sc->sis_ldata->sis_tx_list[i].map) != 0) {
1169 			printf(": can't create tx map\n");
1170 			goto fail_2;
1171 		}
1172 	}
1173 	if (bus_dmamap_create(sc->sc_dmat, MCLBYTES, SIS_MAXTXSEGS,
1174 	    MCLBYTES, 0, BUS_DMA_NOWAIT, &sc->sc_tx_sparemap) != 0) {
1175 		printf(": can't create tx spare map\n");
1176 		goto fail_2;
1177 	}
1178 
1179 	timeout_set(&sc->sis_timeout, sis_tick, sc);
1180 
1181 	ifp = &sc->arpcom.ac_if;
1182 	ifp->if_softc = sc;
1183 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1184 	ifp->if_ioctl = sis_ioctl;
1185 	ifp->if_start = sis_start;
1186 	ifp->if_watchdog = sis_watchdog;
1187 	ifq_init_maxlen(&ifp->if_snd, SIS_TX_LIST_CNT - 1);
1188 	bcopy(sc->sc_dev.dv_xname, ifp->if_xname, IFNAMSIZ);
1189 	ifp->if_hardmtu = 1518; /* determined experimentally on DP83815 */
1190 
1191 	ifp->if_capabilities = IFCAP_VLAN_MTU;
1192 
1193 	sc->sc_mii.mii_ifp = ifp;
1194 	sc->sc_mii.mii_readreg = sis_miibus_readreg;
1195 	sc->sc_mii.mii_writereg = sis_miibus_writereg;
1196 	sc->sc_mii.mii_statchg = sis_miibus_statchg;
1197 	ifmedia_init(&sc->sc_mii.mii_media, 0, sis_ifmedia_upd,sis_ifmedia_sts);
1198 	mii_attach(self, &sc->sc_mii, 0xffffffff, MII_PHY_ANY, MII_OFFSET_ANY,
1199 	    0);
1200 	if (LIST_FIRST(&sc->sc_mii.mii_phys) == NULL) {
1201 		ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE, 0, NULL);
1202 		ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE);
1203 	} else
1204 		ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_AUTO);
1205 
1206 	/*
1207 	 * Call MI attach routines.
1208 	 */
1209 	if_attach(ifp);
1210 	ether_ifattach(ifp);
1211 	return;
1212 
1213 fail_2:
1214 	pci_intr_disestablish(pc, sc->sc_ih);
1215 
1216 fail_1:
1217 	bus_space_unmap(sc->sis_btag, sc->sis_bhandle, size);
1218 }
1219 
1220 int
1221 sis_activate(struct device *self, int act)
1222 {
1223 	struct sis_softc *sc = (struct sis_softc *)self;
1224 	struct ifnet *ifp = &sc->arpcom.ac_if;
1225 	int rv = 0;
1226 
1227 	switch (act) {
1228 	case DVACT_SUSPEND:
1229 		if (ifp->if_flags & IFF_RUNNING)
1230 			sis_stop(sc);
1231 		rv = config_activate_children(self, act);
1232 		break;
1233 	case DVACT_RESUME:
1234 		if (ifp->if_flags & IFF_UP)
1235 			sis_init(sc);
1236 		break;
1237 	default:
1238 		rv = config_activate_children(self, act);
1239 		break;
1240 	}
1241 	return (rv);
1242 }
1243 
1244 /*
1245  * Initialize the TX and RX descriptors and allocate mbufs for them. Note that
1246  * we arrange the descriptors in a closed ring, so that the last descriptor
1247  * points back to the first.
1248  */
1249 int
1250 sis_ring_init(struct sis_softc *sc)
1251 {
1252 	struct sis_list_data	*ld;
1253 	struct sis_ring_data	*cd;
1254 	int			i, nexti;
1255 
1256 	cd = &sc->sis_cdata;
1257 	ld = sc->sis_ldata;
1258 
1259 	for (i = 0; i < SIS_TX_LIST_CNT; i++) {
1260 		if (i == (SIS_TX_LIST_CNT - 1))
1261 			nexti = 0;
1262 		else
1263 			nexti = i + 1;
1264 		ld->sis_tx_list[i].sis_nextdesc = &ld->sis_tx_list[nexti];
1265 		ld->sis_tx_list[i].sis_next =
1266 		    htole32(sc->sc_listmap->dm_segs[0].ds_addr +
1267 		      offsetof(struct sis_list_data, sis_tx_list[nexti]));
1268 		ld->sis_tx_list[i].sis_mbuf = NULL;
1269 		ld->sis_tx_list[i].sis_ptr = 0;
1270 		ld->sis_tx_list[i].sis_ctl = 0;
1271 	}
1272 
1273 	cd->sis_tx_prod = cd->sis_tx_cons = cd->sis_tx_cnt = 0;
1274 
1275 	for (i = 0; i < SIS_RX_LIST_CNT; i++) {
1276 		if (i == SIS_RX_LIST_CNT - 1)
1277 			nexti = 0;
1278 		else
1279 			nexti = i + 1;
1280 		ld->sis_rx_list[i].sis_nextdesc = &ld->sis_rx_list[nexti];
1281 		ld->sis_rx_list[i].sis_next =
1282 		    htole32(sc->sc_listmap->dm_segs[0].ds_addr +
1283 		      offsetof(struct sis_list_data, sis_rx_list[nexti]));
1284 		ld->sis_rx_list[i].sis_ctl = 0;
1285 	}
1286 
1287 	cd->sis_rx_prod = cd->sis_rx_cons = 0;
1288 	if_rxr_init(&cd->sis_rx_ring, 2, SIS_RX_LIST_CNT - 1);
1289 	sis_fill_rx_ring(sc);
1290 
1291 	return (0);
1292 }
1293 
1294 void
1295 sis_fill_rx_ring(struct sis_softc *sc)
1296 {
1297 	struct sis_list_data    *ld;
1298 	struct sis_ring_data    *cd;
1299 	u_int			slots;
1300 
1301 	cd = &sc->sis_cdata;
1302 	ld = sc->sis_ldata;
1303 
1304 	for (slots = if_rxr_get(&cd->sis_rx_ring, SIS_RX_LIST_CNT);
1305 	    slots > 0; slots--) {
1306 		if (sis_newbuf(sc, &ld->sis_rx_list[cd->sis_rx_prod]))
1307 			break;
1308 
1309 		SIS_INC(cd->sis_rx_prod, SIS_RX_LIST_CNT);
1310 	}
1311 	if_rxr_put(&cd->sis_rx_ring, slots);
1312 }
1313 
1314 /*
1315  * Initialize an RX descriptor and attach an MBUF cluster.
1316  */
1317 int
1318 sis_newbuf(struct sis_softc *sc, struct sis_desc *c)
1319 {
1320 	struct mbuf		*m_new = NULL;
1321 
1322 	if (c == NULL)
1323 		return (EINVAL);
1324 
1325 	m_new = MCLGETL(NULL, M_DONTWAIT, MCLBYTES);
1326 	if (!m_new)
1327 		return (ENOBUFS);
1328 
1329 	m_new->m_len = m_new->m_pkthdr.len = MCLBYTES;
1330 
1331 	if (bus_dmamap_load_mbuf(sc->sc_dmat, c->map, m_new,
1332 	    BUS_DMA_NOWAIT)) {
1333 		m_free(m_new);
1334 		return (ENOBUFS);
1335 	}
1336 
1337 	bus_dmamap_sync(sc->sc_dmat, c->map, 0, c->map->dm_mapsize,
1338 	    BUS_DMASYNC_PREREAD);
1339 
1340 	c->sis_mbuf = m_new;
1341 	c->sis_ptr = htole32(c->map->dm_segs[0].ds_addr);
1342 
1343 	bus_dmamap_sync(sc->sc_dmat, sc->sc_listmap,
1344 	    ((caddr_t)c - sc->sc_listkva), sizeof(struct sis_desc),
1345 	    BUS_DMASYNC_PREWRITE);
1346 
1347 	c->sis_ctl = htole32(ETHER_MAX_DIX_LEN);
1348 
1349 	bus_dmamap_sync(sc->sc_dmat, sc->sc_listmap,
1350 	    ((caddr_t)c - sc->sc_listkva), sizeof(struct sis_desc),
1351 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1352 
1353 	return (0);
1354 }
1355 
1356 /*
1357  * A frame has been uploaded: pass the resulting mbuf chain up to
1358  * the higher level protocols.
1359  */
1360 void
1361 sis_rxeof(struct sis_softc *sc)
1362 {
1363 	struct mbuf_list	ml = MBUF_LIST_INITIALIZER();
1364 	struct mbuf		*m;
1365 	struct ifnet		*ifp;
1366 	struct sis_desc		*cur_rx;
1367 	int			total_len = 0;
1368 	u_int32_t		rxstat;
1369 
1370 	ifp = &sc->arpcom.ac_if;
1371 
1372 	while (if_rxr_inuse(&sc->sis_cdata.sis_rx_ring) > 0) {
1373 		cur_rx = &sc->sis_ldata->sis_rx_list[sc->sis_cdata.sis_rx_cons];
1374 		bus_dmamap_sync(sc->sc_dmat, sc->sc_listmap,
1375 		    ((caddr_t)cur_rx - sc->sc_listkva),
1376 		    sizeof(struct sis_desc),
1377 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1378 		if (!SIS_OWNDESC(cur_rx))
1379 			break;
1380 
1381 		rxstat = letoh32(cur_rx->sis_rxstat);
1382 		m = cur_rx->sis_mbuf;
1383 		cur_rx->sis_mbuf = NULL;
1384 		total_len = SIS_RXBYTES(cur_rx);
1385 		/* from here on the buffer is consumed */
1386 		SIS_INC(sc->sis_cdata.sis_rx_cons, SIS_RX_LIST_CNT);
1387 		if_rxr_put(&sc->sis_cdata.sis_rx_ring, 1);
1388 
1389 		/*
1390 		 * DP83816A sometimes produces zero-length packets
1391 		 * shortly after initialisation.
1392 		 */
1393 		if (total_len == 0) {
1394 			m_freem(m);
1395 			continue;
1396 		}
1397 
1398 		/* The ethernet CRC is always included */
1399 		total_len -= ETHER_CRC_LEN;
1400 
1401 		/*
1402 		 * If an error occurs, update stats, clear the
1403 		 * status word and leave the mbuf cluster in place:
1404 		 * it should simply get re-used next time this descriptor
1405 	 	 * comes up in the ring. However, don't report long
1406 		 * frames as errors since they could be VLANs.
1407 		 */
1408 		if (rxstat & SIS_RXSTAT_GIANT &&
1409 		    total_len <= (ETHER_MAX_DIX_LEN - ETHER_CRC_LEN))
1410 			rxstat &= ~SIS_RXSTAT_GIANT;
1411 		if (SIS_RXSTAT_ERROR(rxstat)) {
1412 			ifp->if_ierrors++;
1413 			if (rxstat & SIS_RXSTAT_COLL)
1414 				ifp->if_collisions++;
1415 			m_freem(m);
1416 			continue;
1417 		}
1418 
1419 		/* No errors; receive the packet. */
1420 		bus_dmamap_sync(sc->sc_dmat, cur_rx->map, 0,
1421 		    cur_rx->map->dm_mapsize, BUS_DMASYNC_POSTREAD);
1422 #ifdef __STRICT_ALIGNMENT
1423 		/*
1424 		 * On some architectures, we do not have alignment problems,
1425 		 * so try to allocate a new buffer for the receive ring, and
1426 		 * pass up the one where the packet is already, saving the
1427 		 * expensive copy done in m_devget().
1428 		 * If we are on an architecture with alignment problems, or
1429 		 * if the allocation fails, then use m_devget and leave the
1430 		 * existing buffer in the receive ring.
1431 		 */
1432 		{
1433 			struct mbuf *m0;
1434 			m0 = m_devget(mtod(m, char *), total_len, ETHER_ALIGN);
1435 			m_freem(m);
1436 			if (m0 == NULL) {
1437 				ifp->if_ierrors++;
1438 				continue;
1439 			}
1440 			m = m0;
1441 		}
1442 #else
1443 		m->m_pkthdr.len = m->m_len = total_len;
1444 #endif
1445 
1446 		ml_enqueue(&ml, m);
1447 	}
1448 
1449 	if (ifiq_input(&ifp->if_rcv, &ml))
1450 		if_rxr_livelocked(&sc->sis_cdata.sis_rx_ring);
1451 
1452 	sis_fill_rx_ring(sc);
1453 }
1454 
1455 /*
1456  * A frame was downloaded to the chip. It's safe for us to clean up
1457  * the list buffers.
1458  */
1459 
1460 void
1461 sis_txeof(struct sis_softc *sc)
1462 {
1463 	struct ifnet		*ifp;
1464 	u_int32_t		idx, ctl, txstat;
1465 
1466 	ifp = &sc->arpcom.ac_if;
1467 
1468 	/*
1469 	 * Go through our tx list and free mbufs for those
1470 	 * frames that have been transmitted.
1471 	 */
1472 	for (idx = sc->sis_cdata.sis_tx_cons; sc->sis_cdata.sis_tx_cnt > 0;
1473 	    sc->sis_cdata.sis_tx_cnt--, SIS_INC(idx, SIS_TX_LIST_CNT)) {
1474 		struct sis_desc *cur_tx = &sc->sis_ldata->sis_tx_list[idx];
1475 
1476 		bus_dmamap_sync(sc->sc_dmat, sc->sc_listmap,
1477 		    ((caddr_t)cur_tx - sc->sc_listkva),
1478 		    sizeof(struct sis_desc),
1479 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1480 
1481 		if (SIS_OWNDESC(cur_tx))
1482 			break;
1483 
1484 		ctl = letoh32(cur_tx->sis_ctl);
1485 
1486 		if (ctl & SIS_CMDSTS_MORE)
1487 			continue;
1488 
1489 		txstat = letoh32(cur_tx->sis_txstat);
1490 
1491 		if (!(ctl & SIS_CMDSTS_PKT_OK)) {
1492 			ifp->if_oerrors++;
1493 			if (txstat & SIS_TXSTAT_EXCESSCOLLS)
1494 				ifp->if_collisions++;
1495 			if (txstat & SIS_TXSTAT_OUTOFWINCOLL)
1496 				ifp->if_collisions++;
1497 		}
1498 
1499 		ifp->if_collisions += (txstat & SIS_TXSTAT_COLLCNT) >> 16;
1500 
1501 		if (cur_tx->map->dm_nsegs != 0) {
1502 			bus_dmamap_t map = cur_tx->map;
1503 
1504 			bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
1505 			    BUS_DMASYNC_POSTWRITE);
1506 			bus_dmamap_unload(sc->sc_dmat, map);
1507 		}
1508 		if (cur_tx->sis_mbuf != NULL) {
1509 			m_freem(cur_tx->sis_mbuf);
1510 			cur_tx->sis_mbuf = NULL;
1511 		}
1512 	}
1513 
1514 	if (idx != sc->sis_cdata.sis_tx_cons) {
1515 		/* we freed up some buffers */
1516 		sc->sis_cdata.sis_tx_cons = idx;
1517 		ifq_clr_oactive(&ifp->if_snd);
1518 	}
1519 
1520 	ifp->if_timer = (sc->sis_cdata.sis_tx_cnt == 0) ? 0 : 5;
1521 }
1522 
1523 void
1524 sis_tick(void *xsc)
1525 {
1526 	struct sis_softc	*sc = (struct sis_softc *)xsc;
1527 	struct mii_data		*mii;
1528 	int			s;
1529 
1530 	s = splnet();
1531 
1532 	mii = &sc->sc_mii;
1533 	mii_tick(mii);
1534 
1535 	if (!sc->sis_link)
1536 		sis_miibus_statchg(&sc->sc_dev);
1537 
1538 	timeout_add_sec(&sc->sis_timeout, 1);
1539 
1540 	splx(s);
1541 }
1542 
1543 int
1544 sis_intr(void *arg)
1545 {
1546 	struct sis_softc	*sc = arg;
1547 	struct ifnet		*ifp = &sc->arpcom.ac_if;
1548 	u_int32_t		status;
1549 
1550 	if (sc->sis_stopped)	/* Most likely shared interrupt */
1551 		return (0);
1552 
1553 	/* Reading the ISR register clears all interrupts. */
1554 	status = CSR_READ_4(sc, SIS_ISR);
1555 	if ((status & SIS_INTRS) == 0)
1556 		return (0);
1557 
1558 	if (status &
1559 	    (SIS_ISR_TX_DESC_OK | SIS_ISR_TX_ERR |
1560 	     SIS_ISR_TX_OK | SIS_ISR_TX_IDLE))
1561 		sis_txeof(sc);
1562 
1563 	if (status &
1564 	    (SIS_ISR_RX_DESC_OK | SIS_ISR_RX_OK |
1565 	     SIS_ISR_RX_ERR | SIS_ISR_RX_IDLE))
1566 		sis_rxeof(sc);
1567 
1568 	if (status & (SIS_ISR_RX_IDLE)) {
1569 		/* consume what's there so that sis_rx_cons points
1570 		 * to the first HW owned descriptor. */
1571 		sis_rxeof(sc);
1572 		/* reprogram the RX listptr */
1573 		CSR_WRITE_4(sc, SIS_RX_LISTPTR,
1574 		    sc->sc_listmap->dm_segs[0].ds_addr +
1575 		    offsetof(struct sis_list_data,
1576 		    sis_rx_list[sc->sis_cdata.sis_rx_cons]));
1577 	}
1578 
1579 	if (status & SIS_ISR_SYSERR)
1580 		sis_init(sc);
1581 
1582 	/*
1583 	 * XXX: Re-enable RX engine every time otherwise it occasionally
1584 	 * stops under unknown circumstances.
1585 	 */
1586 	SIS_SETBIT(sc, SIS_CSR, SIS_CSR_RX_ENABLE);
1587 
1588 	if (!ifq_empty(&ifp->if_snd))
1589 		sis_start(ifp);
1590 
1591 	return (1);
1592 }
1593 
1594 /*
1595  * Encapsulate an mbuf chain in a descriptor by coupling the mbuf data
1596  * pointers to the fragment pointers.
1597  */
1598 int
1599 sis_encap(struct sis_softc *sc, struct mbuf *m_head, u_int32_t *txidx)
1600 {
1601 	struct sis_desc		*f = NULL;
1602 	bus_dmamap_t		map;
1603 	int			frag, cur, i, error;
1604 
1605 	map = sc->sc_tx_sparemap;
1606 
1607 	error = bus_dmamap_load_mbuf(sc->sc_dmat, map, m_head,
1608 	    BUS_DMA_NOWAIT);
1609 	switch (error) {
1610 	case 0:
1611 		break;
1612 
1613 	case EFBIG:
1614 		if (m_defrag(m_head, M_DONTWAIT) == 0 &&
1615 		    bus_dmamap_load_mbuf(sc->sc_dmat, map, m_head,
1616 		    BUS_DMA_NOWAIT) == 0)
1617 			break;
1618 
1619 		/* FALLTHROUGH */
1620 	default:
1621 		return (ENOBUFS);
1622 	}
1623 
1624 	if ((SIS_TX_LIST_CNT - (sc->sis_cdata.sis_tx_cnt + map->dm_nsegs)) < 2) {
1625 		bus_dmamap_unload(sc->sc_dmat, map);
1626 		return (ENOBUFS);
1627 	}
1628 
1629 	/*
1630  	 * Start packing the mbufs in this chain into
1631 	 * the fragment pointers. Stop when we run out
1632  	 * of fragments or hit the end of the mbuf chain.
1633 	 */
1634 	cur = frag = *txidx;
1635 
1636 	for (i = 0; i < map->dm_nsegs; i++) {
1637 		f = &sc->sis_ldata->sis_tx_list[frag];
1638 		f->sis_ctl = htole32(SIS_CMDSTS_MORE | map->dm_segs[i].ds_len);
1639 		f->sis_ptr = htole32(map->dm_segs[i].ds_addr);
1640 		if (i != 0)
1641 			f->sis_ctl |= htole32(SIS_CMDSTS_OWN);
1642 		cur = frag;
1643 		SIS_INC(frag, SIS_TX_LIST_CNT);
1644 	}
1645 
1646 	bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
1647 	    BUS_DMASYNC_PREWRITE);
1648 
1649 	sc->sis_ldata->sis_tx_list[cur].sis_mbuf = m_head;
1650 	sc->sis_ldata->sis_tx_list[cur].sis_ctl &= ~htole32(SIS_CMDSTS_MORE);
1651 	sc->sis_ldata->sis_tx_list[*txidx].sis_ctl |= htole32(SIS_CMDSTS_OWN);
1652 	sc->sis_cdata.sis_tx_cnt += map->dm_nsegs;
1653 	*txidx = frag;
1654 
1655 	bus_dmamap_sync(sc->sc_dmat, sc->sc_listmap,
1656 	    offsetof(struct sis_list_data, sis_tx_list[0]),
1657 	    sizeof(struct sis_desc) * SIS_TX_LIST_CNT,
1658 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1659 
1660 	return (0);
1661 }
1662 
1663 /*
1664  * Main transmit routine. To avoid having to do mbuf copies, we put pointers
1665  * to the mbuf data regions directly in the transmit lists. We also save a
1666  * copy of the pointers since the transmit list fragment pointers are
1667  * physical addresses.
1668  */
1669 
1670 void
1671 sis_start(struct ifnet *ifp)
1672 {
1673 	struct sis_softc	*sc;
1674 	struct mbuf		*m_head = NULL;
1675 	u_int32_t		idx, queued = 0;
1676 
1677 	sc = ifp->if_softc;
1678 
1679 	if (!sc->sis_link)
1680 		return;
1681 
1682 	idx = sc->sis_cdata.sis_tx_prod;
1683 
1684 	if (ifq_is_oactive(&ifp->if_snd))
1685 		return;
1686 
1687 	while(sc->sis_ldata->sis_tx_list[idx].sis_mbuf == NULL) {
1688 		m_head = ifq_deq_begin(&ifp->if_snd);
1689 		if (m_head == NULL)
1690 			break;
1691 
1692 		if (sis_encap(sc, m_head, &idx)) {
1693 			ifq_deq_rollback(&ifp->if_snd, m_head);
1694 			ifq_set_oactive(&ifp->if_snd);
1695 			break;
1696 		}
1697 
1698 		/* now we are committed to transmit the packet */
1699 		ifq_deq_commit(&ifp->if_snd, m_head);
1700 
1701 		queued++;
1702 
1703 		/*
1704 		 * If there's a BPF listener, bounce a copy of this frame
1705 		 * to him.
1706 		 */
1707 #if NBPFILTER > 0
1708 		if (ifp->if_bpf)
1709 			bpf_mtap(ifp->if_bpf, m_head, BPF_DIRECTION_OUT);
1710 #endif
1711 	}
1712 
1713 	if (queued) {
1714 		/* Transmit */
1715 		sc->sis_cdata.sis_tx_prod = idx;
1716 		SIS_SETBIT(sc, SIS_CSR, SIS_CSR_TX_ENABLE);
1717 
1718 		/*
1719 		 * Set a timeout in case the chip goes out to lunch.
1720 		 */
1721 		ifp->if_timer = 5;
1722 	}
1723 }
1724 
1725 void
1726 sis_init(void *xsc)
1727 {
1728 	struct sis_softc	*sc = (struct sis_softc *)xsc;
1729 	struct ifnet		*ifp = &sc->arpcom.ac_if;
1730 	struct mii_data		*mii;
1731 	int			s;
1732 
1733 	s = splnet();
1734 
1735 	/*
1736 	 * Cancel pending I/O and free all RX/TX buffers.
1737 	 */
1738 	sis_stop(sc);
1739 
1740 	/*
1741 	 * Reset the chip to a known state.
1742 	 */
1743 	sis_reset(sc);
1744 
1745 #if NS_IHR_DELAY > 0
1746 	/* Configure interrupt holdoff register. */
1747 	if (sc->sis_type == SIS_TYPE_83815 && sc->sis_srr == NS_SRR_16A)
1748 		CSR_WRITE_4(sc, NS_IHR, NS_IHR_VALUE);
1749 #endif
1750 
1751 	mii = &sc->sc_mii;
1752 
1753 	/* Set MAC address */
1754 	if (sc->sis_type == SIS_TYPE_83815) {
1755 		CSR_WRITE_4(sc, SIS_RXFILT_CTL, NS_FILTADDR_PAR0);
1756 		CSR_WRITE_4(sc, SIS_RXFILT_DATA,
1757 		    htole16(((u_int16_t *)sc->arpcom.ac_enaddr)[0]));
1758 		CSR_WRITE_4(sc, SIS_RXFILT_CTL, NS_FILTADDR_PAR1);
1759 		CSR_WRITE_4(sc, SIS_RXFILT_DATA,
1760 		    htole16(((u_int16_t *)sc->arpcom.ac_enaddr)[1]));
1761 		CSR_WRITE_4(sc, SIS_RXFILT_CTL, NS_FILTADDR_PAR2);
1762 		CSR_WRITE_4(sc, SIS_RXFILT_DATA,
1763 		    htole16(((u_int16_t *)sc->arpcom.ac_enaddr)[2]));
1764 	} else {
1765 		CSR_WRITE_4(sc, SIS_RXFILT_CTL, SIS_FILTADDR_PAR0);
1766 		CSR_WRITE_4(sc, SIS_RXFILT_DATA,
1767 		    htole16(((u_int16_t *)sc->arpcom.ac_enaddr)[0]));
1768 		CSR_WRITE_4(sc, SIS_RXFILT_CTL, SIS_FILTADDR_PAR1);
1769 		CSR_WRITE_4(sc, SIS_RXFILT_DATA,
1770 		    htole16(((u_int16_t *)sc->arpcom.ac_enaddr)[1]));
1771 		CSR_WRITE_4(sc, SIS_RXFILT_CTL, SIS_FILTADDR_PAR2);
1772 		CSR_WRITE_4(sc, SIS_RXFILT_DATA,
1773 		    htole16(((u_int16_t *)sc->arpcom.ac_enaddr)[2]));
1774 	}
1775 
1776 	/* Init circular TX/RX lists. */
1777 	if (sis_ring_init(sc) != 0) {
1778 		printf("%s: initialization failed: no memory for rx buffers\n",
1779 		    sc->sc_dev.dv_xname);
1780 		sis_stop(sc);
1781 		splx(s);
1782 		return;
1783 	}
1784 
1785         /*
1786 	 * Page 78 of the DP83815 data sheet (september 2002 version)
1787 	 * recommends the following register settings "for optimum
1788 	 * performance." for rev 15C.  The driver from NS also sets
1789 	 * the PHY_CR register for later versions.
1790 	 *
1791 	 * This resolves an issue with tons of errors in AcceptPerfectMatch
1792 	 * (non-IFF_PROMISC) mode.
1793 	 */
1794 	 if (sc->sis_type == SIS_TYPE_83815 && sc->sis_srr <= NS_SRR_15D) {
1795 		CSR_WRITE_4(sc, NS_PHY_PAGE, 0x0001);
1796 		CSR_WRITE_4(sc, NS_PHY_CR, 0x189C);
1797 		/* set val for c2 */
1798 		CSR_WRITE_4(sc, NS_PHY_TDATA, 0x0000);
1799 		/* load/kill c2 */
1800 		CSR_WRITE_4(sc, NS_PHY_DSPCFG, 0x5040);
1801 		/* raise SD off, from 4 to c */
1802 		CSR_WRITE_4(sc, NS_PHY_SDCFG, 0x008C);
1803 		CSR_WRITE_4(sc, NS_PHY_PAGE, 0);
1804 	}
1805 
1806 	/*
1807 	 * Program promiscuous mode and multicast filters.
1808 	 */
1809 	sis_iff(sc);
1810 
1811 	/*
1812 	 * Load the address of the RX and TX lists.
1813 	 */
1814 	CSR_WRITE_4(sc, SIS_RX_LISTPTR, sc->sc_listmap->dm_segs[0].ds_addr +
1815 	    offsetof(struct sis_list_data, sis_rx_list[0]));
1816 	CSR_WRITE_4(sc, SIS_TX_LISTPTR, sc->sc_listmap->dm_segs[0].ds_addr +
1817 	    offsetof(struct sis_list_data, sis_tx_list[0]));
1818 
1819 	/* SIS_CFG_EDB_MASTER_EN indicates the EDB bus is used instead of
1820 	 * the PCI bus. When this bit is set, the Max DMA Burst Size
1821 	 * for TX/RX DMA should be no larger than 16 double words.
1822 	 */
1823 	if (CSR_READ_4(sc, SIS_CFG) & SIS_CFG_EDB_MASTER_EN)
1824 		CSR_WRITE_4(sc, SIS_RX_CFG, SIS_RXCFG64);
1825 	else
1826 		CSR_WRITE_4(sc, SIS_RX_CFG, SIS_RXCFG256);
1827 
1828 	/* Accept Long Packets for VLAN support */
1829 	SIS_SETBIT(sc, SIS_RX_CFG, SIS_RXCFG_RX_JABBER);
1830 
1831 	/*
1832 	 * Assume 100Mbps link, actual MAC configuration is done
1833 	 * after getting a valid link.
1834 	 */
1835 	CSR_WRITE_4(sc, SIS_TX_CFG, SIS_TXCFG_100);
1836 
1837 	/*
1838 	 * Enable interrupts.
1839 	 */
1840 	CSR_WRITE_4(sc, SIS_IMR, SIS_INTRS);
1841 	CSR_WRITE_4(sc, SIS_IER, 1);
1842 
1843 	/* Clear MAC disable. */
1844 	SIS_CLRBIT(sc, SIS_CSR, SIS_CSR_TX_DISABLE | SIS_CSR_RX_DISABLE);
1845 
1846 	sc->sis_link = 0;
1847 	mii_mediachg(mii);
1848 
1849 	sc->sis_stopped = 0;
1850 	ifp->if_flags |= IFF_RUNNING;
1851 	ifq_clr_oactive(&ifp->if_snd);
1852 
1853 	splx(s);
1854 
1855 	timeout_add_sec(&sc->sis_timeout, 1);
1856 }
1857 
1858 /*
1859  * Set media options.
1860  */
1861 int
1862 sis_ifmedia_upd(struct ifnet *ifp)
1863 {
1864 	struct sis_softc	*sc;
1865 	struct mii_data		*mii;
1866 
1867 	sc = ifp->if_softc;
1868 
1869 	mii = &sc->sc_mii;
1870 	if (mii->mii_instance) {
1871 		struct mii_softc	*miisc;
1872 		LIST_FOREACH(miisc, &mii->mii_phys, mii_list)
1873 			mii_phy_reset(miisc);
1874 	}
1875 	mii_mediachg(mii);
1876 
1877 	return (0);
1878 }
1879 
1880 /*
1881  * Report current media status.
1882  */
1883 void
1884 sis_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
1885 {
1886 	struct sis_softc	*sc;
1887 	struct mii_data		*mii;
1888 
1889 	sc = ifp->if_softc;
1890 
1891 	mii = &sc->sc_mii;
1892 	mii_pollstat(mii);
1893 	ifmr->ifm_active = mii->mii_media_active;
1894 	ifmr->ifm_status = mii->mii_media_status;
1895 }
1896 
1897 int
1898 sis_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
1899 {
1900 	struct sis_softc	*sc = ifp->if_softc;
1901 	struct ifreq		*ifr = (struct ifreq *) data;
1902 	struct mii_data		*mii;
1903 	int			s, error = 0;
1904 
1905 	s = splnet();
1906 
1907 	switch(command) {
1908 	case SIOCSIFADDR:
1909 		ifp->if_flags |= IFF_UP;
1910 		if (!(ifp->if_flags & IFF_RUNNING))
1911 			sis_init(sc);
1912 		break;
1913 
1914 	case SIOCSIFFLAGS:
1915 		if (ifp->if_flags & IFF_UP) {
1916 			if (ifp->if_flags & IFF_RUNNING)
1917 				error = ENETRESET;
1918 			else
1919 				sis_init(sc);
1920 		} else {
1921 			if (ifp->if_flags & IFF_RUNNING)
1922 				sis_stop(sc);
1923 		}
1924 		break;
1925 
1926 	case SIOCGIFMEDIA:
1927 	case SIOCSIFMEDIA:
1928 		mii = &sc->sc_mii;
1929 		error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command);
1930 		break;
1931 
1932 	case SIOCGIFRXR:
1933 		error = if_rxr_ioctl((struct if_rxrinfo *)ifr->ifr_data,
1934 		    NULL, MCLBYTES, &sc->sis_cdata.sis_rx_ring);
1935 		break;
1936 
1937 	default:
1938 		error = ether_ioctl(ifp, &sc->arpcom, command, data);
1939 	}
1940 
1941 	if (error == ENETRESET) {
1942 		if (ifp->if_flags & IFF_RUNNING)
1943 			sis_iff(sc);
1944 		error = 0;
1945 	}
1946 
1947 	splx(s);
1948 	return(error);
1949 }
1950 
1951 void
1952 sis_watchdog(struct ifnet *ifp)
1953 {
1954 	struct sis_softc	*sc;
1955 	int			s;
1956 
1957 	sc = ifp->if_softc;
1958 
1959 	if (sc->sis_stopped)
1960 		return;
1961 
1962 	ifp->if_oerrors++;
1963 	printf("%s: watchdog timeout\n", sc->sc_dev.dv_xname);
1964 
1965 	s = splnet();
1966 	sis_init(sc);
1967 
1968 	if (!ifq_empty(&ifp->if_snd))
1969 		sis_start(ifp);
1970 
1971 	splx(s);
1972 }
1973 
1974 /*
1975  * Stop the adapter and free any mbufs allocated to the
1976  * RX and TX lists.
1977  */
1978 void
1979 sis_stop(struct sis_softc *sc)
1980 {
1981 	int			i;
1982 	struct ifnet		*ifp;
1983 
1984 	if (sc->sis_stopped)
1985 		return;
1986 
1987 	ifp = &sc->arpcom.ac_if;
1988 	ifp->if_timer = 0;
1989 
1990 	timeout_del(&sc->sis_timeout);
1991 
1992 	ifp->if_flags &= ~IFF_RUNNING;
1993 	ifq_clr_oactive(&ifp->if_snd);
1994 	sc->sis_stopped = 1;
1995 
1996 	CSR_WRITE_4(sc, SIS_IER, 0);
1997 	CSR_WRITE_4(sc, SIS_IMR, 0);
1998 	CSR_READ_4(sc, SIS_ISR); /* clear any interrupts already pending */
1999 	SIS_SETBIT(sc, SIS_CSR, SIS_CSR_TX_DISABLE | SIS_CSR_RX_DISABLE);
2000 	DELAY(1000);
2001 	CSR_WRITE_4(sc, SIS_TX_LISTPTR, 0);
2002 	CSR_WRITE_4(sc, SIS_RX_LISTPTR, 0);
2003 
2004 	sc->sis_link = 0;
2005 
2006 	/*
2007 	 * Free data in the RX lists.
2008 	 */
2009 	for (i = 0; i < SIS_RX_LIST_CNT; i++) {
2010 		if (sc->sis_ldata->sis_rx_list[i].map->dm_nsegs != 0) {
2011 			bus_dmamap_t map = sc->sis_ldata->sis_rx_list[i].map;
2012 
2013 			bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
2014 			    BUS_DMASYNC_POSTREAD);
2015 			bus_dmamap_unload(sc->sc_dmat, map);
2016 		}
2017 		if (sc->sis_ldata->sis_rx_list[i].sis_mbuf != NULL) {
2018 			m_freem(sc->sis_ldata->sis_rx_list[i].sis_mbuf);
2019 			sc->sis_ldata->sis_rx_list[i].sis_mbuf = NULL;
2020 		}
2021 		bzero(&sc->sis_ldata->sis_rx_list[i],
2022 		    sizeof(struct sis_desc) - sizeof(bus_dmamap_t));
2023 	}
2024 
2025 	/*
2026 	 * Free the TX list buffers.
2027 	 */
2028 	for (i = 0; i < SIS_TX_LIST_CNT; i++) {
2029 		if (sc->sis_ldata->sis_tx_list[i].map->dm_nsegs != 0) {
2030 			bus_dmamap_t map = sc->sis_ldata->sis_tx_list[i].map;
2031 
2032 			bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
2033 			    BUS_DMASYNC_POSTWRITE);
2034 			bus_dmamap_unload(sc->sc_dmat, map);
2035 		}
2036 		if (sc->sis_ldata->sis_tx_list[i].sis_mbuf != NULL) {
2037 			m_freem(sc->sis_ldata->sis_tx_list[i].sis_mbuf);
2038 			sc->sis_ldata->sis_tx_list[i].sis_mbuf = NULL;
2039 		}
2040 		bzero(&sc->sis_ldata->sis_tx_list[i],
2041 		    sizeof(struct sis_desc) - sizeof(bus_dmamap_t));
2042 	}
2043 }
2044