xref: /openbsd-src/sys/dev/pci/if_sk.c (revision 3a3fbb3f2e2521ab7c4a56b7ff7462ebd9095ec5)
1 /*	$OpenBSD: if_sk.c,v 1.19 2001/11/06 19:53:19 miod Exp $	*/
2 
3 /*
4  * Copyright (c) 1997, 1998, 1999, 2000
5  *	Bill Paul <wpaul@ctr.columbia.edu>.  All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  * 3. All advertising materials mentioning features or use of this software
16  *    must display the following acknowledgement:
17  *	This product includes software developed by Bill Paul.
18  * 4. Neither the name of the author nor the names of any co-contributors
19  *    may be used to endorse or promote products derived from this software
20  *    without specific prior written permission.
21  *
22  * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
23  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25  * ARE DISCLAIMED.  IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
26  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
27  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
28  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
29  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
30  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
31  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
32  * THE POSSIBILITY OF SUCH DAMAGE.
33  *
34  * $FreeBSD: /c/ncvs/src/sys/pci/if_sk.c,v 1.20 2000/04/22 02:16:37 wpaul Exp $
35  */
36 
37 /*
38  * SysKonnect SK-NET gigabit ethernet driver for FreeBSD. Supports
39  * the SK-984x series adapters, both single port and dual port.
40  * References:
41  * 	The XaQti XMAC II datasheet,
42  * http://www.freebsd.org/~wpaul/SysKonnect/xmacii_datasheet_rev_c_9-29.pdf
43  *	The SysKonnect GEnesis manual, http://www.syskonnect.com
44  *
45  * Note: XaQti has been aquired by Vitesse, and Vitesse does not have the
46  * XMAC II datasheet online. I have put my copy at people.freebsd.org as a
47  * convience to others until Vitesse corrects this problem:
48  *
49  * http://people.freebsd.org/~wpaul/SysKonnect/xmacii_datasheet_rev_c_9-29.pdf
50  *
51  * Written by Bill Paul <wpaul@ee.columbia.edu>
52  * Department of Electrical Engineering
53  * Columbia University, New York City
54  */
55 
56 /*
57  * The SysKonnect gigabit ethernet adapters consist of two main
58  * components: the SysKonnect GEnesis controller chip and the XaQti Corp.
59  * XMAC II gigabit ethernet MAC. The XMAC provides all of the MAC
60  * components and a PHY while the GEnesis controller provides a PCI
61  * interface with DMA support. Each card may have between 512K and
62  * 2MB of SRAM on board depending on the configuration.
63  *
64  * The SysKonnect GEnesis controller can have either one or two XMAC
65  * chips connected to it, allowing single or dual port NIC configurations.
66  * SysKonnect has the distinction of being the only vendor on the market
67  * with a dual port gigabit ethernet NIC. The GEnesis provides dual FIFOs,
68  * dual DMA queues, packet/MAC/transmit arbiters and direct access to the
69  * XMAC registers. This driver takes advantage of these features to allow
70  * both XMACs to operate as independent interfaces.
71  */
72 
73 #include "bpfilter.h"
74 
75 #include <sys/param.h>
76 #include <sys/systm.h>
77 #include <sys/sockio.h>
78 #include <sys/mbuf.h>
79 #include <sys/malloc.h>
80 #include <sys/kernel.h>
81 #include <sys/socket.h>
82 #include <sys/device.h>
83 #include <sys/queue.h>
84 
85 #include <net/if.h>
86 #include <net/if_dl.h>
87 #include <net/if_types.h>
88 
89 #ifdef INET
90 #include <netinet/in.h>
91 #include <netinet/in_systm.h>
92 #include <netinet/in_var.h>
93 #include <netinet/ip.h>
94 #include <netinet/if_ether.h>
95 #endif
96 
97 #include <net/if_media.h>
98 
99 #if NBPFILTER > 0
100 #include <net/bpf.h>
101 #endif
102 
103 #include <uvm/uvm_extern.h>              /* for vtophys */
104 #include <machine/bus.h>
105 
106 #include <dev/mii/mii.h>
107 #include <dev/mii/miivar.h>
108 #include <dev/mii/brgphyreg.h>
109 
110 #include <dev/pci/pcireg.h>
111 #include <dev/pci/pcivar.h>
112 #include <dev/pci/pcidevs.h>
113 
114 #define SK_USEIOSPACE
115 #define	SK_VERBOSE
116 
117 #include <dev/pci/if_skreg.h>
118 #include <dev/pci/xmaciireg.h>
119 
120 int skc_probe		__P((struct device *, void *, void *));
121 void skc_attach		__P((struct device *, struct device *self, void *aux));
122 int sk_probe		__P((struct device *, void *, void *));
123 void sk_attach		__P((struct device *, struct device *self, void *aux));
124 int skcprint		__P((void *, const char *));
125 int sk_attach_xmac	__P((struct sk_softc *, int));
126 int sk_intr		__P((void *));
127 void sk_intr_bcom	__P((struct sk_if_softc *));
128 void sk_intr_xmac	__P((struct sk_if_softc *));
129 void sk_rxeof		__P((struct sk_if_softc *));
130 void sk_txeof		__P((struct sk_if_softc *));
131 int sk_encap		__P((struct sk_if_softc *, struct mbuf *, u_int32_t *));
132 void sk_start		__P((struct ifnet *));
133 int sk_ioctl		__P((struct ifnet *, u_long, caddr_t));
134 void sk_init		__P((void *));
135 void sk_init_xmac	__P((struct sk_if_softc *));
136 void sk_stop		__P((struct sk_if_softc *));
137 void sk_watchdog	__P((struct ifnet *));
138 void sk_shutdown	__P((void *));
139 int sk_ifmedia_upd	__P((struct ifnet *));
140 void sk_ifmedia_sts	__P((struct ifnet *, struct ifmediareq *));
141 void sk_reset		__P((struct sk_softc *));
142 int sk_newbuf		__P((struct sk_if_softc *, struct sk_chain *,
143     struct mbuf *));
144 int sk_init_rx_ring	__P((struct sk_if_softc *));
145 void sk_init_tx_ring	__P((struct sk_if_softc *));
146 u_int32_t sk_win_read_4	__P((struct sk_softc *, int));
147 u_int16_t sk_win_read_2	__P((struct sk_softc *, int));
148 u_int8_t sk_win_read_1	__P((struct sk_softc *, int));
149 void sk_win_write_4	__P((struct sk_softc *, int, u_int32_t));
150 void sk_win_write_2	__P((struct sk_softc *, int, u_int32_t));
151 void sk_win_write_1	__P((struct sk_softc *, int, u_int32_t));
152 u_int8_t sk_vpd_readbyte	__P((struct sk_softc *, int));
153 void sk_vpd_read_res	__P((struct sk_softc *,
154 					struct vpd_res *, int));
155 void sk_vpd_read	__P((struct sk_softc *));
156 
157 int sk_miibus_readreg	__P((struct device *, int, int));
158 void sk_miibus_writereg	__P((struct device *, int, int, int));
159 void sk_miibus_statchg	__P((struct device *));
160 
161 u_int32_t sk_calchash	__P((caddr_t));
162 void sk_setfilt		__P((struct sk_if_softc *, caddr_t, int));
163 void sk_setmulti	__P((struct sk_if_softc *));
164 void sk_tick		__P((void *));
165 
166 #define SK_SETBIT(sc, reg, x)		\
167 	CSR_WRITE_4(sc, reg, CSR_READ_4(sc, reg) | x)
168 
169 #define SK_CLRBIT(sc, reg, x)		\
170 	CSR_WRITE_4(sc, reg, CSR_READ_4(sc, reg) & ~x)
171 
172 #define SK_WIN_SETBIT_4(sc, reg, x)	\
173 	sk_win_write_4(sc, reg, sk_win_read_4(sc, reg) | x)
174 
175 #define SK_WIN_CLRBIT_4(sc, reg, x)	\
176 	sk_win_write_4(sc, reg, sk_win_read_4(sc, reg) & ~x)
177 
178 #define SK_WIN_SETBIT_2(sc, reg, x)	\
179 	sk_win_write_2(sc, reg, sk_win_read_2(sc, reg) | x)
180 
181 #define SK_WIN_CLRBIT_2(sc, reg, x)	\
182 	sk_win_write_2(sc, reg, sk_win_read_2(sc, reg) & ~x)
183 
184 u_int32_t sk_win_read_4(sc, reg)
185 	struct sk_softc		*sc;
186 	int			reg;
187 {
188 	CSR_WRITE_4(sc, SK_RAP, SK_WIN(reg));
189 	return(CSR_READ_4(sc, SK_WIN_BASE + SK_REG(reg)));
190 }
191 
192 u_int16_t sk_win_read_2(sc, reg)
193 	struct sk_softc		*sc;
194 	int			reg;
195 {
196 	CSR_WRITE_4(sc, SK_RAP, SK_WIN(reg));
197 	return(CSR_READ_2(sc, SK_WIN_BASE + SK_REG(reg)));
198 }
199 
200 u_int8_t sk_win_read_1(sc, reg)
201 	struct sk_softc		*sc;
202 	int			reg;
203 {
204 	CSR_WRITE_4(sc, SK_RAP, SK_WIN(reg));
205 	return(CSR_READ_1(sc, SK_WIN_BASE + SK_REG(reg)));
206 }
207 
208 void sk_win_write_4(sc, reg, val)
209 	struct sk_softc		*sc;
210 	int			reg;
211 	u_int32_t		val;
212 {
213 	CSR_WRITE_4(sc, SK_RAP, SK_WIN(reg));
214 	CSR_WRITE_4(sc, SK_WIN_BASE + SK_REG(reg), val);
215 	return;
216 }
217 
218 void sk_win_write_2(sc, reg, val)
219 	struct sk_softc		*sc;
220 	int			reg;
221 	u_int32_t		val;
222 {
223 	CSR_WRITE_4(sc, SK_RAP, SK_WIN(reg));
224 	CSR_WRITE_2(sc, SK_WIN_BASE + SK_REG(reg), (u_int32_t)val);
225 	return;
226 }
227 
228 void sk_win_write_1(sc, reg, val)
229 	struct sk_softc		*sc;
230 	int			reg;
231 	u_int32_t		val;
232 {
233 	CSR_WRITE_4(sc, SK_RAP, SK_WIN(reg));
234 	CSR_WRITE_1(sc, SK_WIN_BASE + SK_REG(reg), val);
235 	return;
236 }
237 
238 /*
239  * The VPD EEPROM contains Vital Product Data, as suggested in
240  * the PCI 2.1 specification. The VPD data is separared into areas
241  * denoted by resource IDs. The SysKonnect VPD contains an ID string
242  * resource (the name of the adapter), a read-only area resource
243  * containing various key/data fields and a read/write area which
244  * can be used to store asset management information or log messages.
245  * We read the ID string and read-only into buffers attached to
246  * the controller softc structure for later use. At the moment,
247  * we only use the ID string during sk_attach().
248  */
249 u_int8_t sk_vpd_readbyte(sc, addr)
250 	struct sk_softc		*sc;
251 	int			addr;
252 {
253 	int			i;
254 
255 	sk_win_write_2(sc, SK_PCI_REG(SK_PCI_VPD_ADDR), addr);
256 	for (i = 0; i < SK_TIMEOUT; i++) {
257 		DELAY(1);
258 		if (sk_win_read_2(sc,
259 		    SK_PCI_REG(SK_PCI_VPD_ADDR)) & SK_VPD_FLAG)
260 			break;
261 	}
262 
263 	if (i == SK_TIMEOUT)
264 		return(0);
265 
266 	return(sk_win_read_1(sc, SK_PCI_REG(SK_PCI_VPD_DATA)));
267 }
268 
269 void sk_vpd_read_res(sc, res, addr)
270 	struct sk_softc		*sc;
271 	struct vpd_res		*res;
272 	int			addr;
273 {
274 	int			i;
275 	u_int8_t		*ptr;
276 
277 	ptr = (u_int8_t *)res;
278 	for (i = 0; i < sizeof(struct vpd_res); i++)
279 		ptr[i] = sk_vpd_readbyte(sc, i + addr);
280 
281 	return;
282 }
283 
284 void sk_vpd_read(sc)
285 	struct sk_softc		*sc;
286 {
287 	int			pos = 0, i;
288 	struct vpd_res		res;
289 
290 	if (sc->sk_vpd_prodname != NULL)
291 		free(sc->sk_vpd_prodname, M_DEVBUF);
292 	if (sc->sk_vpd_readonly != NULL)
293 		free(sc->sk_vpd_readonly, M_DEVBUF);
294 	sc->sk_vpd_prodname = NULL;
295 	sc->sk_vpd_readonly = NULL;
296 
297 	sk_vpd_read_res(sc, &res, pos);
298 
299 	if (res.vr_id != VPD_RES_ID) {
300 		printf("%s: bad VPD resource id: expected %x got %x\n",
301 		    sc->sk_dev.dv_xname, VPD_RES_ID, res.vr_id);
302 		return;
303 	}
304 
305 	pos += sizeof(res);
306 	sc->sk_vpd_prodname = malloc(res.vr_len + 1, M_DEVBUF, M_NOWAIT);
307 	for (i = 0; i < res.vr_len; i++)
308 		sc->sk_vpd_prodname[i] = sk_vpd_readbyte(sc, i + pos);
309 	sc->sk_vpd_prodname[i] = '\0';
310 	pos += i;
311 
312 	sk_vpd_read_res(sc, &res, pos);
313 
314 	if (res.vr_id != VPD_RES_READ) {
315 		printf("%s: bad VPD resource id: expected %x got %x\n",
316 		    sc->sk_dev.dv_xname, VPD_RES_READ, res.vr_id);
317 		return;
318 	}
319 
320 	pos += sizeof(res);
321 	sc->sk_vpd_readonly = malloc(res.vr_len, M_DEVBUF, M_NOWAIT);
322 	for (i = 0; i < res.vr_len + 1; i++)
323 		sc->sk_vpd_readonly[i] = sk_vpd_readbyte(sc, i + pos);
324 
325 	return;
326 }
327 
328 int
329 sk_miibus_readreg(dev, phy, reg)
330 	struct device *dev;
331 	int phy, reg;
332 {
333 	struct sk_if_softc *sc_if = (struct sk_if_softc *)dev;
334 	int i;
335 
336 	if (sc_if->sk_phytype == SK_PHYTYPE_XMAC && phy != 0)
337 		return(0);
338 
339 	SK_XM_WRITE_2(sc_if, XM_PHY_ADDR, reg|(phy << 8));
340 	SK_XM_READ_2(sc_if, XM_PHY_DATA);
341 	if (sc_if->sk_phytype != SK_PHYTYPE_XMAC) {
342 		for (i = 0; i < SK_TIMEOUT; i++) {
343 			DELAY(1);
344 			if (SK_XM_READ_2(sc_if, XM_MMUCMD) &
345 			    XM_MMUCMD_PHYDATARDY)
346 				break;
347 		}
348 
349 		if (i == SK_TIMEOUT) {
350 			printf("%s: phy failed to come ready\n",
351 			    sc_if->sk_dev.dv_xname);
352 			return(0);
353 		}
354 	}
355 	DELAY(1);
356 	return(SK_XM_READ_2(sc_if, XM_PHY_DATA));
357 }
358 
359 void
360 sk_miibus_writereg(dev, phy, reg, val)
361 	struct device *dev;
362 	int phy, reg, val;
363 {
364 	struct sk_if_softc *sc_if = (struct sk_if_softc *)dev;
365 	int i;
366 
367 	SK_XM_WRITE_2(sc_if, XM_PHY_ADDR, reg|(phy << 8));
368 	for (i = 0; i < SK_TIMEOUT; i++) {
369 		if (!(SK_XM_READ_2(sc_if, XM_MMUCMD) & XM_MMUCMD_PHYBUSY))
370 			break;
371 	}
372 
373 	if (i == SK_TIMEOUT) {
374 		printf("%s: phy failed to come ready\n",
375 		    sc_if->sk_dev.dv_xname);
376 		return;
377 	}
378 
379 	SK_XM_WRITE_2(sc_if, XM_PHY_DATA, val);
380 	for (i = 0; i < SK_TIMEOUT; i++) {
381 		DELAY(1);
382 		if (!(SK_XM_READ_2(sc_if, XM_MMUCMD) & XM_MMUCMD_PHYBUSY))
383 			break;
384 	}
385 
386 	if (i == SK_TIMEOUT)
387 		printf("%s: phy write timed out\n", sc_if->sk_dev.dv_xname);
388 
389 	return;
390 }
391 
392 void
393 sk_miibus_statchg(dev)
394 	struct device *dev;
395 {
396 	struct sk_if_softc *sc_if;
397 	struct mii_data *mii;
398 
399 	sc_if = (struct sk_if_softc *)dev;
400 	mii = &sc_if->sk_mii;
401 
402 	/*
403 	 * If this is a GMII PHY, manually set the XMAC's
404 	 * duplex mode accordingly.
405 	 */
406 	if (sc_if->sk_phytype != SK_PHYTYPE_XMAC) {
407 		if ((mii->mii_media_active & IFM_GMASK) == IFM_FDX) {
408 			SK_XM_SETBIT_2(sc_if, XM_MMUCMD, XM_MMUCMD_GMIIFDX);
409 		} else {
410 			SK_XM_CLRBIT_2(sc_if, XM_MMUCMD, XM_MMUCMD_GMIIFDX);
411 		}
412 	}
413 
414 	return;
415 }
416 
417 #define SK_POLY		0xEDB88320
418 #define SK_BITS		6
419 
420 u_int32_t sk_calchash(addr)
421 	caddr_t			addr;
422 {
423 	u_int32_t		idx, bit, data, crc;
424 
425 	/* Compute CRC for the address value. */
426 	crc = 0xFFFFFFFF; /* initial value */
427 
428 	for (idx = 0; idx < 6; idx++) {
429 		for (data = *addr++, bit = 0; bit < 8; bit++, data >>= 1)
430 			crc = (crc >> 1) ^ (((crc ^ data) & 1) ? SK_POLY : 0);
431 	}
432 
433 	return (~crc & ((1 << SK_BITS) - 1));
434 }
435 
436 void sk_setfilt(sc_if, addr, slot)
437 	struct sk_if_softc	*sc_if;
438 	caddr_t			addr;
439 	int			slot;
440 {
441 	int			base;
442 
443 	base = XM_RXFILT_ENTRY(slot);
444 
445 	SK_XM_WRITE_2(sc_if, base, *(u_int16_t *)(&addr[0]));
446 	SK_XM_WRITE_2(sc_if, base + 2, *(u_int16_t *)(&addr[2]));
447 	SK_XM_WRITE_2(sc_if, base + 4, *(u_int16_t *)(&addr[4]));
448 
449 	return;
450 }
451 
452 void
453 sk_setmulti(sc_if)
454 	struct sk_if_softc	*sc_if;
455 {
456 	struct ifnet *ifp;
457 	u_int32_t hashes[2] = { 0, 0 };
458 	int h, i;
459 	struct arpcom *ac = &sc_if->arpcom;
460 	struct ether_multi *enm;
461 	struct ether_multistep step;
462 	u_int8_t dummy[] = { 0, 0, 0, 0, 0 ,0 };
463 
464 	ifp = &sc_if->arpcom.ac_if;
465 
466 	/* First, zot all the existing filters. */
467 	for (i = 1; i < XM_RXFILT_MAX; i++)
468 		sk_setfilt(sc_if, (caddr_t)&dummy, i);
469 	SK_XM_WRITE_4(sc_if, XM_MAR0, 0);
470 	SK_XM_WRITE_4(sc_if, XM_MAR2, 0);
471 
472 	/* Now program new ones. */
473 allmulti:
474 	if (ifp->if_flags & IFF_ALLMULTI || ifp->if_flags & IFF_PROMISC) {
475 		hashes[0] = 0xFFFFFFFF;
476 		hashes[1] = 0xFFFFFFFF;
477 	} else {
478 		i = 1;
479 		/* First find the tail of the list. */
480 		ETHER_FIRST_MULTI(step, ac, enm);
481 		while (enm != NULL) {
482 			if (bcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
483 				ifp->if_flags |= IFF_ALLMULTI;
484 				goto allmulti;
485 			}
486 			/*
487 			 * Program the first XM_RXFILT_MAX multicast groups
488 			 * into the perfect filter. For all others,
489 			 * use the hash table.
490 			 */
491 			if (i < XM_RXFILT_MAX) {
492 				sk_setfilt(sc_if, enm->enm_addrlo, i);
493 				i++;
494 			}
495 			else {
496 				h = sk_calchash(enm->enm_addrlo);
497 				if (h < 32)
498 					hashes[0] |= (1 << h);
499 				else
500 					hashes[1] |= (1 << (h - 32));
501 			}
502 
503 			ETHER_NEXT_MULTI(step, enm);
504 		}
505 	}
506 
507 	SK_XM_SETBIT_4(sc_if, XM_MODE, XM_MODE_RX_USE_HASH|
508 	    XM_MODE_RX_USE_PERFECT);
509 	SK_XM_WRITE_4(sc_if, XM_MAR0, hashes[0]);
510 	SK_XM_WRITE_4(sc_if, XM_MAR2, hashes[1]);
511 
512 	return;
513 }
514 
515 int sk_init_rx_ring(sc_if)
516 	struct sk_if_softc	*sc_if;
517 {
518 	struct sk_chain_data	*cd;
519 	struct sk_ring_data	*rd;
520 	int			i;
521 
522 	cd = &sc_if->sk_cdata;
523 	rd = sc_if->sk_rdata;
524 
525 	bzero((char *)rd->sk_rx_ring,
526 	    sizeof(struct sk_rx_desc) * SK_RX_RING_CNT);
527 
528 	for (i = 0; i < SK_RX_RING_CNT; i++) {
529 		cd->sk_rx_chain[i].sk_desc = &rd->sk_rx_ring[i];
530 		if (sk_newbuf(sc_if, &cd->sk_rx_chain[i], NULL) == ENOBUFS) {
531 			printf("%s: failed alloc of %dth mbuf\n",
532 			    sc_if->sk_dev.dv_xname, i);
533 			return(ENOBUFS);
534 		}
535 		if (i == (SK_RX_RING_CNT - 1)) {
536 			cd->sk_rx_chain[i].sk_next =
537 			    &cd->sk_rx_chain[0];
538 			rd->sk_rx_ring[i].sk_next =
539 			    vtophys(&rd->sk_rx_ring[0]);
540 		} else {
541 			cd->sk_rx_chain[i].sk_next =
542 			    &cd->sk_rx_chain[i + 1];
543 			rd->sk_rx_ring[i].sk_next =
544 			    vtophys(&rd->sk_rx_ring[i + 1]);
545 		}
546 	}
547 
548 	sc_if->sk_cdata.sk_rx_prod = 0;
549 	sc_if->sk_cdata.sk_rx_cons = 0;
550 
551 	return(0);
552 }
553 
554 void sk_init_tx_ring(sc_if)
555 	struct sk_if_softc	*sc_if;
556 {
557 	struct sk_chain_data	*cd;
558 	struct sk_ring_data	*rd;
559 	int			i;
560 
561 	cd = &sc_if->sk_cdata;
562 	rd = sc_if->sk_rdata;
563 
564 	bzero((char *)sc_if->sk_rdata->sk_tx_ring,
565 	    sizeof(struct sk_tx_desc) * SK_TX_RING_CNT);
566 
567 	for (i = 0; i < SK_TX_RING_CNT; i++) {
568 		cd->sk_tx_chain[i].sk_desc = &rd->sk_tx_ring[i];
569 		if (i == (SK_TX_RING_CNT - 1)) {
570 			cd->sk_tx_chain[i].sk_next =
571 			    &cd->sk_tx_chain[0];
572 			rd->sk_tx_ring[i].sk_next =
573 			    vtophys(&rd->sk_tx_ring[0]);
574 		} else {
575 			cd->sk_tx_chain[i].sk_next =
576 			    &cd->sk_tx_chain[i + 1];
577 			rd->sk_tx_ring[i].sk_next =
578 			    vtophys(&rd->sk_tx_ring[i + 1]);
579 		}
580 	}
581 
582 	sc_if->sk_cdata.sk_tx_prod = 0;
583 	sc_if->sk_cdata.sk_tx_cons = 0;
584 	sc_if->sk_cdata.sk_tx_cnt = 0;
585 
586 	return;
587 }
588 
589 int sk_newbuf(sc_if, c, m)
590 	struct sk_if_softc	*sc_if;
591 	struct sk_chain		*c;
592 	struct mbuf		*m;
593 {
594 	struct mbuf		*m_new = NULL;
595 	struct sk_rx_desc	*r;
596 
597 	if (m == NULL) {
598 		MGETHDR(m_new, M_DONTWAIT, MT_DATA);
599 		if (m_new == NULL) {
600 			printf("%s: no memory for rx list -- "
601 			    "packet dropped!\n", sc_if->sk_dev.dv_xname);
602 			return(ENOBUFS);
603 		}
604 
605 		/* Allocate the jumbo buffer */
606 		MCLGET(m_new, M_DONTWAIT);
607 		if (!(m_new->m_flags & M_EXT)) {
608 			m_freem(m_new);
609 			return (ENOBUFS);
610 		}
611 	} else {
612 		/*
613 	 	 * We're re-using a previously allocated mbuf;
614 		 * be sure to re-init pointers and lengths to
615 		 * default values.
616 		 */
617 		m_new = m;
618 		m_new->m_data = m_new->m_ext.ext_buf;
619 	}
620 	m_new->m_len = m_new->m_pkthdr.len = MCLBYTES;
621 
622 	/*
623 	 * Adjust alignment so packet payload begins on a
624 	 * longword boundary. Mandatory for Alpha, useful on
625 	 * x86 too.
626 	 */
627 	m_adj(m_new, ETHER_ALIGN);
628 
629 	r = c->sk_desc;
630 	c->sk_mbuf = m_new;
631 	r->sk_data_lo = vtophys(mtod(m_new, caddr_t));
632 	r->sk_ctl = m_new->m_len | SK_RXSTAT;
633 
634 	return(0);
635 }
636 
637 /*
638  * Set media options.
639  */
640 int
641 sk_ifmedia_upd(ifp)
642 	struct ifnet *ifp;
643 {
644 	struct sk_if_softc *sc_if = ifp->if_softc;
645 
646 	sk_init(sc_if);
647 	mii_mediachg(&sc_if->sk_mii);
648 	return(0);
649 }
650 
651 /*
652  * Report current media status.
653  */
654 void
655 sk_ifmedia_sts(ifp, ifmr)
656 	struct ifnet *ifp;
657 	struct ifmediareq *ifmr;
658 {
659 	struct sk_if_softc *sc_if = ifp->if_softc;
660 
661 	mii_pollstat(&sc_if->sk_mii);
662 	ifmr->ifm_active = sc_if->sk_mii.mii_media_active;
663 	ifmr->ifm_status = sc_if->sk_mii.mii_media_status;
664 }
665 
666 int
667 sk_ioctl(ifp, command, data)
668 	struct ifnet *ifp;
669 	u_long command;
670 	caddr_t data;
671 {
672 	struct sk_if_softc *sc_if = ifp->if_softc;
673 	struct ifreq *ifr = (struct ifreq *) data;
674 	struct ifaddr *ifa = (struct ifaddr *) data;
675 	struct mii_data *mii;
676 	int s, error = 0;
677 
678 	s = splimp();
679 
680 	if ((error = ether_ioctl(ifp, &sc_if->arpcom, command, data)) > 0) {
681 		splx(s);
682 		return error;
683 	}
684 
685 	switch(command) {
686 	case SIOCSIFADDR:
687 		ifp->if_flags |= IFF_UP;
688 		switch (ifa->ifa_addr->sa_family) {
689 #ifdef INET
690 		case AF_INET:
691 			sk_init(sc_if);
692 			arp_ifinit(&sc_if->arpcom, ifa);
693 			break;
694 #endif /* INET */
695 		default:
696 			sk_init(sc_if);
697 			break;
698 		}
699 		break;
700 	case SIOCSIFFLAGS:
701 		if (ifp->if_flags & IFF_UP) {
702 			if (ifp->if_flags & IFF_RUNNING &&
703 			    ifp->if_flags & IFF_PROMISC &&
704 			    !(sc_if->sk_if_flags & IFF_PROMISC)) {
705 				SK_XM_SETBIT_4(sc_if, XM_MODE,
706 				    XM_MODE_RX_PROMISC);
707 				sk_setmulti(sc_if);
708 			} else if (ifp->if_flags & IFF_RUNNING &&
709 			    !(ifp->if_flags & IFF_PROMISC) &&
710 			    sc_if->sk_if_flags & IFF_PROMISC) {
711 				SK_XM_CLRBIT_4(sc_if, XM_MODE,
712 				    XM_MODE_RX_PROMISC);
713 				sk_setmulti(sc_if);
714 			} else
715 				sk_init(sc_if);
716 		} else {
717 			if (ifp->if_flags & IFF_RUNNING)
718 				sk_stop(sc_if);
719 		}
720 		sc_if->sk_if_flags = ifp->if_flags;
721 		error = 0;
722 		break;
723 	case SIOCADDMULTI:
724 	case SIOCDELMULTI:
725 		error = (command == SIOCADDMULTI) ?
726 		    ether_addmulti(ifr, &sc_if->arpcom) :
727 		    ether_delmulti(ifr, &sc_if->arpcom);
728 
729 		if (error == ENETRESET) {
730 			/*
731 			 * Multicast list has changed; set the hardware
732 			 * filter accordingly.
733 			 */
734 			sk_setmulti(sc_if);
735 			error = 0;
736 		}
737 		break;
738 	case SIOCGIFMEDIA:
739 	case SIOCSIFMEDIA:
740 		mii = &sc_if->sk_mii;
741 		error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command);
742 		break;
743 	default:
744 		error = EINVAL;
745 		break;
746 	}
747 
748 	(void)splx(s);
749 
750 	return(error);
751 }
752 
753 /*
754  * Probe for a SysKonnect GEnesis chip. Check the PCI vendor and device
755  * IDs against our list and return a device name if we find a match.
756  */
757 int
758 skc_probe(parent, match, aux)
759 	struct device *parent;
760 	void *match, *aux;
761 {
762 	struct pci_attach_args *pa = aux;
763 
764 	if (PCI_VENDOR(pa->pa_id) == PCI_VENDOR_SCHNEIDERKOCH &&
765 	    PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_SCHNEIDERKOCH_GE)
766 		return (1);
767 
768 	return (0);
769 }
770 
771 /*
772  * Force the GEnesis into reset, then bring it out of reset.
773  */
774 void sk_reset(sc)
775 	struct sk_softc		*sc;
776 {
777 	CSR_WRITE_4(sc, SK_CSR, SK_CSR_SW_RESET);
778 	CSR_WRITE_4(sc, SK_CSR, SK_CSR_MASTER_RESET);
779 	DELAY(1000);
780 	CSR_WRITE_4(sc, SK_CSR, SK_CSR_SW_UNRESET);
781 	CSR_WRITE_4(sc, SK_CSR, SK_CSR_MASTER_UNRESET);
782 
783 	/* Configure packet arbiter */
784 	sk_win_write_2(sc, SK_PKTARB_CTL, SK_PKTARBCTL_UNRESET);
785 	sk_win_write_2(sc, SK_RXPA1_TINIT, SK_PKTARB_TIMEOUT);
786 	sk_win_write_2(sc, SK_TXPA1_TINIT, SK_PKTARB_TIMEOUT);
787 	sk_win_write_2(sc, SK_RXPA2_TINIT, SK_PKTARB_TIMEOUT);
788 	sk_win_write_2(sc, SK_TXPA2_TINIT, SK_PKTARB_TIMEOUT);
789 
790 	/* Enable RAM interface */
791 	sk_win_write_4(sc, SK_RAMCTL, SK_RAMCTL_UNRESET);
792 
793 	/*
794          * Configure interrupt moderation. The moderation timer
795 	 * defers interrupts specified in the interrupt moderation
796 	 * timer mask based on the timeout specified in the interrupt
797 	 * moderation timer init register. Each bit in the timer
798 	 * register represents 18.825ns, so to specify a timeout in
799 	 * microseconds, we have to multiply by 54.
800 	 */
801         sk_win_write_4(sc, SK_IMTIMERINIT, SK_IM_USECS(200));
802         sk_win_write_4(sc, SK_IMMR, SK_ISR_TX1_S_EOF|SK_ISR_TX2_S_EOF|
803 	    SK_ISR_RX1_EOF|SK_ISR_RX2_EOF);
804         sk_win_write_1(sc, SK_IMTIMERCTL, SK_IMCTL_START);
805 
806 	return;
807 }
808 
809 int
810 sk_probe(parent, match, aux)
811 	struct device *parent;
812 	void *match, *aux;
813 {
814 	struct skc_attach_args *sa = aux;
815 
816 	if (sa->skc_port != SK_PORT_A && sa->skc_port != SK_PORT_B)
817 		return(0);
818 
819 	return (1);
820 }
821 
822 /*
823  * Each XMAC chip is attached as a separate logical IP interface.
824  * Single port cards will have only one logical interface of course.
825  */
826 void
827 sk_attach(parent, self, aux)
828 	struct device *parent, *self;
829 	void *aux;
830 {
831 	struct sk_if_softc *sc_if = (struct sk_if_softc *) self;
832 	struct sk_softc *sc = (struct sk_softc *)parent;
833 	struct skc_attach_args *sa = aux;
834 	struct ifnet *ifp;
835 	caddr_t kva;
836 	bus_dma_segment_t seg;
837 	bus_dmamap_t dmamap;
838 	int i, rseg;
839 
840 	sc_if->sk_port = sa->skc_port;
841 	sc_if->sk_softc = sc;
842 	sc->sk_if[sa->skc_port] = sc_if;
843 
844 	if (sa->skc_port == SK_PORT_A)
845 		sc_if->sk_tx_bmu = SK_BMU_TXS_CSR0;
846 	if (sa->skc_port == SK_PORT_B)
847 		sc_if->sk_tx_bmu = SK_BMU_TXS_CSR1;
848 
849 	/*
850 	 * Get station address for this interface. Note that
851 	 * dual port cards actually come with three station
852 	 * addresses: one for each port, plus an extra. The
853 	 * extra one is used by the SysKonnect driver software
854 	 * as a 'virtual' station address for when both ports
855 	 * are operating in failover mode. Currently we don't
856 	 * use this extra address.
857 	 */
858 	for (i = 0; i < ETHER_ADDR_LEN; i++)
859 		sc_if->arpcom.ac_enaddr[i] =
860 		    sk_win_read_1(sc, SK_MAC0_0 + (sa->skc_port * 8) + i);
861 
862 
863 	printf(": address %s\n",
864 	    ether_sprintf(sc_if->arpcom.ac_enaddr));
865 
866 	/*
867 	 * Set up RAM buffer addresses. The NIC will have a certain
868 	 * amount of SRAM on it, somewhere between 512K and 2MB. We
869 	 * need to divide this up a) between the transmitter and
870  	 * receiver and b) between the two XMACs, if this is a
871 	 * dual port NIC. Our algotithm is to divide up the memory
872 	 * evenly so that everyone gets a fair share.
873 	 */
874 	if (sk_win_read_1(sc, SK_CONFIG) & SK_CONFIG_SINGLEMAC) {
875 		u_int32_t		chunk, val;
876 
877 		chunk = sc->sk_ramsize / 2;
878 		val = sc->sk_rboff / sizeof(u_int64_t);
879 		sc_if->sk_rx_ramstart = val;
880 		val += (chunk / sizeof(u_int64_t));
881 		sc_if->sk_rx_ramend = val - 1;
882 		sc_if->sk_tx_ramstart = val;
883 		val += (chunk / sizeof(u_int64_t));
884 		sc_if->sk_tx_ramend = val - 1;
885 	} else {
886 		u_int32_t		chunk, val;
887 
888 		chunk = sc->sk_ramsize / 4;
889 		val = (sc->sk_rboff + (chunk * 2 * sc_if->sk_port)) /
890 		    sizeof(u_int64_t);
891 		sc_if->sk_rx_ramstart = val;
892 		val += (chunk / sizeof(u_int64_t));
893 		sc_if->sk_rx_ramend = val - 1;
894 		sc_if->sk_tx_ramstart = val;
895 		val += (chunk / sizeof(u_int64_t));
896 		sc_if->sk_tx_ramend = val - 1;
897 	}
898 
899 	/* Read and save PHY type and set PHY address */
900 	sc_if->sk_phytype = sk_win_read_1(sc, SK_EPROM1) & 0xF;
901 	switch (sc_if->sk_phytype) {
902 	case SK_PHYTYPE_XMAC:
903 		sc_if->sk_phyaddr = SK_PHYADDR_XMAC;
904 		break;
905 	case SK_PHYTYPE_BCOM:
906 		sc_if->sk_phyaddr = SK_PHYADDR_BCOM;
907 		break;
908 	default:
909 		printf("%s: unsupported PHY type: %d\n",
910 		    sc->sk_dev.dv_xname, sc_if->sk_phytype);
911 		return;
912 	}
913 
914 	/* Allocate the descriptor queues. */
915 	if (bus_dmamem_alloc(sc->sc_dmatag, sizeof(struct sk_ring_data),
916 	    PAGE_SIZE, 0, &seg, 1, &rseg, BUS_DMA_NOWAIT)) {
917 		printf("%s: can't alloc rx buffers\n", sc->sk_dev.dv_xname);
918 		goto fail;
919 	}
920 	if (bus_dmamem_map(sc->sc_dmatag, &seg, rseg,
921 	    sizeof(struct sk_ring_data), &kva, BUS_DMA_NOWAIT)) {
922 		printf("%s: can't map dma buffers (%d bytes)\n",
923 		       sc_if->sk_dev.dv_xname, sizeof(struct sk_ring_data));
924 		bus_dmamem_free(sc->sc_dmatag, &seg, rseg);
925 		goto fail;
926 	}
927 	if (bus_dmamap_create(sc->sc_dmatag, sizeof(struct sk_ring_data), 1,
928 	    sizeof(struct sk_ring_data), 0, BUS_DMA_NOWAIT, &dmamap)) {
929 		printf("%s: can't create dma map\n", sc_if->sk_dev.dv_xname);
930 		bus_dmamem_unmap(sc->sc_dmatag, kva,
931 		    sizeof(struct sk_ring_data));
932 		bus_dmamem_free(sc->sc_dmatag, &seg, rseg);
933 		goto fail;
934 	}
935 	if (bus_dmamap_load(sc->sc_dmatag, dmamap, kva,
936 	    sizeof(struct sk_ring_data), NULL, BUS_DMA_NOWAIT)) {
937 		printf("%s: can't load dma map\n", sc_if->sk_dev.dv_xname);
938 		bus_dmamap_destroy(sc->sc_dmatag, dmamap);
939 		bus_dmamem_unmap(sc->sc_dmatag, kva,
940 		    sizeof(struct sk_ring_data));
941 		bus_dmamem_free(sc->sc_dmatag, &seg, rseg);
942 		goto fail;
943 	}
944         sc_if->sk_rdata = (struct sk_ring_data *)kva;
945 	bzero(sc_if->sk_rdata, sizeof(struct sk_ring_data));
946 
947 	ifp = &sc_if->arpcom.ac_if;
948 	ifp->if_softc = sc_if;
949 	ifp->if_mtu = ETHERMTU;
950 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
951 	ifp->if_ioctl = sk_ioctl;
952 	ifp->if_output = ether_output;
953 	ifp->if_start = sk_start;
954 	ifp->if_watchdog = sk_watchdog;
955 	ifp->if_baudrate = 1000000000;
956 	ifp->if_snd.ifq_maxlen = SK_TX_RING_CNT - 1;
957 	bcopy(sc_if->sk_dev.dv_xname, ifp->if_xname, IFNAMSIZ);
958 
959 	/*
960 	 * Do miibus setup.
961 	 */
962 	sk_init_xmac(sc_if);
963 	sc_if->sk_mii.mii_ifp = ifp;
964 	sc_if->sk_mii.mii_readreg = sk_miibus_readreg;
965 	sc_if->sk_mii.mii_writereg = sk_miibus_writereg;
966 	sc_if->sk_mii.mii_statchg = sk_miibus_statchg;
967 	ifmedia_init(&sc_if->sk_mii.mii_media, 0,
968 	    sk_ifmedia_upd, sk_ifmedia_sts);
969 	mii_attach(self, &sc_if->sk_mii, 0xffffffff, MII_PHY_ANY,
970 	    MII_OFFSET_ANY, 0);
971 	if (LIST_FIRST(&sc_if->sk_mii.mii_phys) == NULL) {
972 		printf("%s: no PHY found!\n", sc_if->sk_dev.dv_xname);
973 		ifmedia_add(&sc_if->sk_mii.mii_media, IFM_ETHER|IFM_MANUAL,
974 		    0, NULL);
975 		ifmedia_set(&sc_if->sk_mii.mii_media, IFM_ETHER|IFM_MANUAL);
976 	}
977 	else
978 		ifmedia_set(&sc_if->sk_mii.mii_media, IFM_ETHER|IFM_AUTO);
979 
980 	timeout_set(&sc_if->sk_tick_ch, sk_tick, sc_if);
981 	timeout_add(&sc_if->sk_tick_ch, hz);
982 
983 	/*
984 	 * Call MI attach routines.
985 	 */
986 	if_attach(ifp);
987 	ether_ifattach(ifp);
988 
989 	return;
990 
991 fail:
992 	sc->sk_if[sa->skc_port] = NULL;
993 }
994 
995 int
996 skcprint(aux, pnp)
997 	void *aux;
998 	const char *pnp;
999 {
1000 	struct skc_attach_args *sa = aux;
1001 
1002 	if (pnp)
1003 		printf("sk port %c at %s",
1004 		    (sa->skc_port == SK_PORT_A) ? 'A' : 'B', pnp);
1005 	else
1006 		printf(" port %c", (sa->skc_port == SK_PORT_A) ? 'A' : 'B');
1007 	return (UNCONF);
1008 }
1009 
1010 /*
1011  * Attach the interface. Allocate softc structures, do ifmedia
1012  * setup and ethernet/BPF attach.
1013  */
1014 void
1015 skc_attach(parent, self, aux)
1016 	struct device *parent, *self;
1017 	void *aux;
1018 {
1019 	struct sk_softc *sc = (struct sk_softc *)self;
1020 	struct pci_attach_args *pa = aux;
1021 	struct skc_attach_args skca;
1022 	pci_chipset_tag_t pc = pa->pa_pc;
1023 	pci_intr_handle_t ih;
1024 	const char *intrstr = NULL;
1025 	bus_addr_t iobase;
1026 	bus_size_t iosize;
1027 	int s;
1028 	u_int32_t command;
1029 
1030 	s = splimp();
1031 
1032 	/*
1033 	 * Handle power management nonsense.
1034 	 */
1035 	command = pci_conf_read(pc, pa->pa_tag, SK_PCI_CAPID) & 0x000000FF;
1036 	if (command == 0x01) {
1037 
1038 		command = pci_conf_read(pc, pa->pa_tag, SK_PCI_PWRMGMTCTRL);
1039 		if (command & SK_PSTATE_MASK) {
1040 			u_int32_t		iobase, membase, irq;
1041 
1042 			/* Save important PCI config data. */
1043 			iobase = pci_conf_read(pc, pa->pa_tag, SK_PCI_LOIO);
1044 			membase = pci_conf_read(pc, pa->pa_tag, SK_PCI_LOMEM);
1045 			irq = pci_conf_read(pc, pa->pa_tag, SK_PCI_INTLINE);
1046 
1047 			/* Reset the power state. */
1048 			printf("%s chip is in D%d power mode "
1049 			    "-- setting to D0\n", sc->sk_dev.dv_xname,
1050 			    command & SK_PSTATE_MASK);
1051 			command &= 0xFFFFFFFC;
1052 			pci_conf_write(pc, pa->pa_tag,
1053 			    SK_PCI_PWRMGMTCTRL, command);
1054 
1055 			/* Restore PCI config data. */
1056 			pci_conf_write(pc, pa->pa_tag, SK_PCI_LOIO, iobase);
1057 			pci_conf_write(pc, pa->pa_tag, SK_PCI_LOMEM, membase);
1058 			pci_conf_write(pc, pa->pa_tag, SK_PCI_INTLINE, irq);
1059 		}
1060 	}
1061 
1062 	/*
1063 	 * Map control/status registers.
1064 	 */
1065 	command = pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG);
1066 	command |= PCI_COMMAND_IO_ENABLE |
1067 	    PCI_COMMAND_MEM_ENABLE |
1068 	    PCI_COMMAND_MASTER_ENABLE;
1069 	pci_conf_write(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, command);
1070 	command = pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG);
1071 
1072 #ifdef SK_USEIOSPACE
1073 	if (!(command & PCI_COMMAND_IO_ENABLE)) {
1074 		printf(": failed to enable I/O ports!\n");
1075 		goto fail;
1076 	}
1077 	/*
1078 	 * Map control/status registers.
1079 	 */
1080 	if (pci_io_find(pc, pa->pa_tag, SK_PCI_LOIO, &iobase, &iosize)) {
1081 		printf(": can't find i/o space\n");
1082 		goto fail;
1083 	}
1084 	if (bus_space_map(pa->pa_iot, iobase, iosize, 0, &sc->sk_bhandle)) {
1085 		printf(": can't map i/o space\n");
1086 		goto fail;
1087 	}
1088 	sc->sk_btag = pa->pa_iot;
1089 #else
1090 	if (!(command & PCI_COMMAND_MEM_ENABLE)) {
1091 		printf(": failed to enable memory mapping!\n");
1092 		goto fail;
1093 	}
1094 	if (pci_mem_find(pc, pa->pa_tag, SK_PCI_LOMEM, &iobase, &iosize, NULL)){
1095 		printf(": can't find mem space\n");
1096 		goto fail;
1097 	}
1098 	if (bus_space_map(pa->pa_memt, iobase, iosize, 0, &sc->sk_bhandle)) {
1099 		printf(": can't map mem space\n");
1100 		goto fail;
1101 	}
1102 	sc->sk_btag = pa->pa_memt;
1103 #endif
1104 	sc->sc_dmatag = pa->pa_dmat;
1105 
1106 	/* Allocate interrupt */
1107 	if (pci_intr_map(pa, &ih)) {
1108 		printf(": couldn't map interrupt\n");
1109 		goto fail;
1110 	}
1111 
1112 	intrstr = pci_intr_string(pc, ih);
1113 	sc->sk_intrhand = pci_intr_establish(pc, ih, IPL_NET, sk_intr, sc,
1114 	    self->dv_xname);
1115 	if (sc->sk_intrhand == NULL) {
1116 		printf(": couldn't establish interrupt");
1117 		if (intrstr != NULL)
1118 			printf(" at %s", intrstr);
1119 		goto fail;
1120 	}
1121 	printf(": %s\n", intrstr);
1122 
1123 	/* Reset the adapter. */
1124 	sk_reset(sc);
1125 
1126 	/* Read and save vital product data from EEPROM. */
1127 	sk_vpd_read(sc);
1128 
1129 	/* Read and save RAM size and RAMbuffer offset */
1130 	switch(sk_win_read_1(sc, SK_EPROM0)) {
1131 	case SK_RAMSIZE_512K_64:
1132 		sc->sk_ramsize = 0x80000;
1133 		sc->sk_rboff = SK_RBOFF_0;
1134 		break;
1135 	case SK_RAMSIZE_1024K_64:
1136 		sc->sk_ramsize = 0x100000;
1137 		sc->sk_rboff = SK_RBOFF_80000;
1138 		break;
1139 	case SK_RAMSIZE_1024K_128:
1140 		sc->sk_ramsize = 0x100000;
1141 		sc->sk_rboff = SK_RBOFF_0;
1142 		break;
1143 	case SK_RAMSIZE_2048K_128:
1144 		sc->sk_ramsize = 0x200000;
1145 		sc->sk_rboff = SK_RBOFF_0;
1146 		break;
1147 	default:
1148 		printf("%s: unknown ram size: %d\n",
1149 		    sc->sk_dev.dv_xname, sk_win_read_1(sc, SK_EPROM0));
1150 		goto fail;
1151 		break;
1152 	}
1153 
1154 	/* Read and save physical media type */
1155 	switch(sk_win_read_1(sc, SK_PMDTYPE)) {
1156 	case SK_PMD_1000BASESX:
1157 		sc->sk_pmd = IFM_1000_SX;
1158 		break;
1159 	case SK_PMD_1000BASELX:
1160 		sc->sk_pmd = IFM_1000_LX;
1161 		break;
1162 	case SK_PMD_1000BASECX:
1163 		sc->sk_pmd = IFM_1000_CX;
1164 		break;
1165 	case SK_PMD_1000BASETX:
1166 		sc->sk_pmd = IFM_1000_TX;
1167 		break;
1168 	default:
1169 		printf("%s: unknown media type: 0x%x\n",
1170 		    sc->sk_dev.dv_xname, sk_win_read_1(sc, SK_PMDTYPE));
1171 		goto fail;
1172 	}
1173 
1174 	/* Announce the product name. */
1175 	printf("%s: %s\n", sc->sk_dev.dv_xname, sc->sk_vpd_prodname);
1176 
1177 	skca.skc_port = SK_PORT_A;
1178 	(void)config_found(&sc->sk_dev, &skca, skcprint);
1179 
1180 	if (!(sk_win_read_1(sc, SK_CONFIG) & SK_CONFIG_SINGLEMAC)) {
1181 		skca.skc_port = SK_PORT_B;
1182 		(void)config_found(&sc->sk_dev, &skca, skcprint);
1183 	}
1184 
1185 	/* Turn on the 'driver is loaded' LED. */
1186 	CSR_WRITE_2(sc, SK_LED, SK_LED_GREEN_ON);
1187 
1188 fail:
1189 	splx(s);
1190 }
1191 
1192 int sk_encap(sc_if, m_head, txidx)
1193         struct sk_if_softc	*sc_if;
1194         struct mbuf		*m_head;
1195         u_int32_t		*txidx;
1196 {
1197 	struct sk_tx_desc	*f = NULL;
1198 	struct mbuf		*m;
1199 	u_int32_t		frag, cur, cnt = 0;
1200 
1201 	m = m_head;
1202 	cur = frag = *txidx;
1203 
1204 	/*
1205 	 * Start packing the mbufs in this chain into
1206 	 * the fragment pointers. Stop when we run out
1207 	 * of fragments or hit the end of the mbuf chain.
1208 	 */
1209 	for (m = m_head; m != NULL; m = m->m_next) {
1210 		if (m->m_len != 0) {
1211 			if ((SK_TX_RING_CNT -
1212 			    (sc_if->sk_cdata.sk_tx_cnt + cnt)) < 2)
1213 				return(ENOBUFS);
1214 			f = &sc_if->sk_rdata->sk_tx_ring[frag];
1215 			f->sk_data_lo = vtophys(mtod(m, vm_offset_t));
1216 			f->sk_ctl = m->m_len | SK_OPCODE_DEFAULT;
1217 			if (cnt == 0)
1218 				f->sk_ctl |= SK_TXCTL_FIRSTFRAG;
1219 			else
1220 				f->sk_ctl |= SK_TXCTL_OWN;
1221 			cur = frag;
1222 			SK_INC(frag, SK_TX_RING_CNT);
1223 			cnt++;
1224 		}
1225 	}
1226 
1227 	if (m != NULL)
1228 		return(ENOBUFS);
1229 
1230 	sc_if->sk_rdata->sk_tx_ring[cur].sk_ctl |=
1231 		SK_TXCTL_LASTFRAG|SK_TXCTL_EOF_INTR;
1232 	sc_if->sk_cdata.sk_tx_chain[cur].sk_mbuf = m_head;
1233 	sc_if->sk_rdata->sk_tx_ring[*txidx].sk_ctl |= SK_TXCTL_OWN;
1234 	sc_if->sk_cdata.sk_tx_cnt += cnt;
1235 
1236 	*txidx = frag;
1237 
1238 	return(0);
1239 }
1240 
1241 void sk_start(ifp)
1242 	struct ifnet		*ifp;
1243 {
1244         struct sk_softc		*sc;
1245         struct sk_if_softc	*sc_if;
1246         struct mbuf		*m_head = NULL;
1247         u_int32_t		idx;
1248 
1249 	sc_if = ifp->if_softc;
1250 	sc = sc_if->sk_softc;
1251 
1252 	idx = sc_if->sk_cdata.sk_tx_prod;
1253 
1254 	while(sc_if->sk_cdata.sk_tx_chain[idx].sk_mbuf == NULL) {
1255 		IF_DEQUEUE(&ifp->if_snd, m_head);
1256 		if (m_head == NULL)
1257 			break;
1258 
1259 		/*
1260 		 * Pack the data into the transmit ring. If we
1261 		 * don't have room, set the OACTIVE flag and wait
1262 		 * for the NIC to drain the ring.
1263 		 */
1264 		if (sk_encap(sc_if, m_head, &idx)) {
1265 			IF_PREPEND(&ifp->if_snd, m_head);
1266 			ifp->if_flags |= IFF_OACTIVE;
1267 			break;
1268 		}
1269 
1270 		/*
1271 		 * If there's a BPF listener, bounce a copy of this frame
1272 		 * to him.
1273 		 */
1274 #if NBPFILTER > 0
1275 		if (ifp->if_bpf)
1276 			bpf_mtap(ifp->if_bpf, m_head);
1277 #endif
1278 	}
1279 
1280 	/* Transmit */
1281 	sc_if->sk_cdata.sk_tx_prod = idx;
1282 	CSR_WRITE_4(sc, sc_if->sk_tx_bmu, SK_TXBMU_TX_START);
1283 
1284 	/* Set a timeout in case the chip goes out to lunch. */
1285 	ifp->if_timer = 5;
1286 
1287 	return;
1288 }
1289 
1290 
1291 void sk_watchdog(ifp)
1292 	struct ifnet		*ifp;
1293 {
1294 	struct sk_if_softc	*sc_if;
1295 
1296 	sc_if = ifp->if_softc;
1297 
1298 	printf("%s: watchdog timeout\n", sc_if->sk_dev.dv_xname);
1299 	sk_init(sc_if);
1300 
1301 	return;
1302 }
1303 
1304 void sk_shutdown(v)
1305 	void *v;
1306 {
1307 	struct sk_softc		*sc = v;
1308 
1309 	/* Turn off the 'driver is loaded' LED. */
1310 	CSR_WRITE_2(sc, SK_LED, SK_LED_GREEN_OFF);
1311 
1312 	/*
1313 	 * Reset the GEnesis controller. Doing this should also
1314 	 * assert the resets on the attached XMAC(s).
1315 	 */
1316 	sk_reset(sc);
1317 
1318 	return;
1319 }
1320 
1321 void sk_rxeof(sc_if)
1322 	struct sk_if_softc	*sc_if;
1323 {
1324 	struct mbuf		*m;
1325 	struct ifnet		*ifp;
1326 	struct sk_chain		*cur_rx;
1327 	int			total_len = 0;
1328 	int			i;
1329 	u_int32_t		rxstat;
1330 
1331 	ifp = &sc_if->arpcom.ac_if;
1332 	i = sc_if->sk_cdata.sk_rx_prod;
1333 	cur_rx = &sc_if->sk_cdata.sk_rx_chain[i];
1334 
1335 	while(!(sc_if->sk_rdata->sk_rx_ring[i].sk_ctl & SK_RXCTL_OWN)) {
1336 
1337 		cur_rx = &sc_if->sk_cdata.sk_rx_chain[i];
1338 		rxstat = sc_if->sk_rdata->sk_rx_ring[i].sk_xmac_rxstat;
1339 		m = cur_rx->sk_mbuf;
1340 		cur_rx->sk_mbuf = NULL;
1341 		total_len = SK_RXBYTES(sc_if->sk_rdata->sk_rx_ring[i].sk_ctl);
1342 		SK_INC(i, SK_RX_RING_CNT);
1343 
1344 		if (rxstat & XM_RXSTAT_ERRFRAME) {
1345 			ifp->if_ierrors++;
1346 			sk_newbuf(sc_if, cur_rx, m);
1347 			continue;
1348 		}
1349 
1350 		/*
1351 		 * Try to allocate a new jumbo buffer. If that
1352 		 * fails, copy the packet to mbufs and put the
1353 		 * jumbo buffer back in the ring so it can be
1354 		 * re-used. If allocating mbufs fails, then we
1355 		 * have to drop the packet.
1356 		 */
1357 		if (sk_newbuf(sc_if, cur_rx, NULL) == ENOBUFS) {
1358 			struct mbuf		*m0;
1359 			m0 = m_devget(mtod(m, char *) - ETHER_ALIGN,
1360 			    total_len + ETHER_ALIGN, 0, ifp, NULL);
1361 			sk_newbuf(sc_if, cur_rx, m);
1362 			if (m0 == NULL) {
1363 				printf("%s: no receive buffers "
1364 				    "available -- packet dropped!\n",
1365 				    sc_if->sk_dev.dv_xname);
1366 				ifp->if_ierrors++;
1367 				continue;
1368 			}
1369 			m_adj(m0, ETHER_ALIGN);
1370 			m = m0;
1371 		} else {
1372 			m->m_pkthdr.rcvif = ifp;
1373 			m->m_pkthdr.len = m->m_len = total_len;
1374 		}
1375 
1376 		ifp->if_ipackets++;
1377 
1378 #if NBPFILTER > 0
1379 		if (ifp->if_bpf)
1380 			bpf_mtap(ifp->if_bpf, m);
1381 #endif
1382 		/* pass it on. */
1383 		ether_input_mbuf(ifp, m);
1384 	}
1385 
1386 	sc_if->sk_cdata.sk_rx_prod = i;
1387 
1388 	return;
1389 }
1390 
1391 void sk_txeof(sc_if)
1392 	struct sk_if_softc	*sc_if;
1393 {
1394 	struct sk_tx_desc	*cur_tx = NULL;
1395 	struct ifnet		*ifp;
1396 	u_int32_t		idx;
1397 
1398 	ifp = &sc_if->arpcom.ac_if;
1399 
1400 	/*
1401 	 * Go through our tx ring and free mbufs for those
1402 	 * frames that have been sent.
1403 	 */
1404 	idx = sc_if->sk_cdata.sk_tx_cons;
1405 	while(idx != sc_if->sk_cdata.sk_tx_prod) {
1406 		cur_tx = &sc_if->sk_rdata->sk_tx_ring[idx];
1407 		if (cur_tx->sk_ctl & SK_TXCTL_OWN)
1408 			break;
1409 		if (cur_tx->sk_ctl & SK_TXCTL_LASTFRAG)
1410 			ifp->if_opackets++;
1411 		if (sc_if->sk_cdata.sk_tx_chain[idx].sk_mbuf != NULL) {
1412 			m_freem(sc_if->sk_cdata.sk_tx_chain[idx].sk_mbuf);
1413 			sc_if->sk_cdata.sk_tx_chain[idx].sk_mbuf = NULL;
1414 		}
1415 		sc_if->sk_cdata.sk_tx_cnt--;
1416 		SK_INC(idx, SK_TX_RING_CNT);
1417 		ifp->if_timer = 0;
1418 	}
1419 
1420 	sc_if->sk_cdata.sk_tx_cons = idx;
1421 
1422 	if (cur_tx != NULL)
1423 		ifp->if_flags &= ~IFF_OACTIVE;
1424 
1425 	return;
1426 }
1427 
1428 void
1429 sk_tick(xsc_if)
1430 	void *xsc_if;
1431 {
1432 	struct sk_if_softc *sc_if;
1433 	struct mii_data *mii;
1434 	struct ifnet *ifp;
1435 	int i;
1436 
1437 	sc_if = xsc_if;
1438 	ifp = &sc_if->arpcom.ac_if;
1439 	mii = &sc_if->sk_mii;
1440 
1441 	if (!(ifp->if_flags & IFF_UP))
1442 		return;
1443 
1444 	if (sc_if->sk_phytype == SK_PHYTYPE_BCOM) {
1445 		sk_intr_bcom(sc_if);
1446 		return;
1447 	}
1448 
1449 	/*
1450 	 * According to SysKonnect, the correct way to verify that
1451 	 * the link has come back up is to poll bit 0 of the GPIO
1452 	 * register three times. This pin has the signal from the
1453 	 * link sync pin connected to it; if we read the same link
1454 	 * state 3 times in a row, we know the link is up.
1455 	 */
1456 	for (i = 0; i < 3; i++) {
1457 		if (SK_XM_READ_2(sc_if, XM_GPIO) & XM_GPIO_GP0_SET)
1458 			break;
1459 	}
1460 
1461 	if (i != 3) {
1462 		timeout_add(&sc_if->sk_tick_ch, hz);
1463 		return;
1464 	}
1465 
1466 	/* Turn the GP0 interrupt back on. */
1467 	SK_XM_CLRBIT_2(sc_if, XM_IMR, XM_IMR_GP0_SET);
1468 	SK_XM_READ_2(sc_if, XM_ISR);
1469 	mii_tick(mii);
1470 	mii_pollstat(mii);
1471 	timeout_del(&sc_if->sk_tick_ch);
1472 }
1473 
1474 void
1475 sk_intr_bcom(sc_if)
1476 	struct sk_if_softc *sc_if;
1477 {
1478 	struct sk_softc *sc;
1479 	struct mii_data *mii;
1480 	struct ifnet *ifp;
1481 	int status;
1482 
1483 	sc = sc_if->sk_softc;
1484 	mii = &sc_if->sk_mii;
1485 	ifp = &sc_if->arpcom.ac_if;
1486 
1487 	SK_XM_CLRBIT_2(sc_if, XM_MMUCMD, XM_MMUCMD_TX_ENB|XM_MMUCMD_RX_ENB);
1488 
1489 	/*
1490 	 * Read the PHY interrupt register to make sure
1491 	 * we clear any pending interrupts.
1492 	 */
1493 	status = sk_miibus_readreg((struct device *)sc_if,
1494 	    SK_PHYADDR_BCOM, BRGPHY_MII_ISR);
1495 
1496 	if (!(ifp->if_flags & IFF_RUNNING)) {
1497 		sk_init_xmac(sc_if);
1498 		return;
1499 	}
1500 
1501 	if (status & (BRGPHY_ISR_LNK_CHG|BRGPHY_ISR_AN_PR)) {
1502 		int lstat;
1503 		lstat = sk_miibus_readreg((struct device *)sc_if,
1504 		    SK_PHYADDR_BCOM, BRGPHY_MII_AUXSTS);
1505 
1506 		if (!(lstat & BRGPHY_AUXSTS_LINK) && sc_if->sk_link) {
1507 			mii_mediachg(mii);
1508 			/* Turn off the link LED. */
1509 			SK_IF_WRITE_1(sc_if, 0,
1510 			    SK_LINKLED1_CTL, SK_LINKLED_OFF);
1511 			sc_if->sk_link = 0;
1512 		} else if (status & BRGPHY_ISR_LNK_CHG) {
1513 			sk_miibus_writereg((struct device *)sc_if,
1514 			    SK_PHYADDR_BCOM, BRGPHY_MII_IMR, 0xFF00);
1515 			mii_tick(mii);
1516 			sc_if->sk_link = 1;
1517 			/* Turn on the link LED. */
1518 			SK_IF_WRITE_1(sc_if, 0, SK_LINKLED1_CTL,
1519 			    SK_LINKLED_ON|SK_LINKLED_LINKSYNC_OFF|
1520 			    SK_LINKLED_BLINK_OFF);
1521 			mii_pollstat(mii);
1522 		} else {
1523 			mii_tick(mii);
1524 			timeout_add(&sc_if->sk_tick_ch, hz);
1525 		}
1526 	}
1527 
1528 	SK_XM_SETBIT_2(sc_if, XM_MMUCMD, XM_MMUCMD_TX_ENB|XM_MMUCMD_RX_ENB);
1529 
1530 	return;
1531 }
1532 
1533 void sk_intr_xmac(sc_if)
1534 	struct sk_if_softc	*sc_if;
1535 {
1536 	struct sk_softc		*sc;
1537 	u_int16_t		status;
1538 
1539 	sc = sc_if->sk_softc;
1540 	status = SK_XM_READ_2(sc_if, XM_ISR);
1541 
1542 	if (sc_if->sk_phytype == SK_PHYTYPE_XMAC) {
1543 		if (status & XM_ISR_GP0_SET) {
1544 			SK_XM_SETBIT_2(sc_if, XM_IMR, XM_IMR_GP0_SET);
1545 			timeout_add(&sc_if->sk_tick_ch, hz);
1546 		}
1547 
1548 		if (status & XM_ISR_AUTONEG_DONE) {
1549 			timeout_add(&sc_if->sk_tick_ch, hz);
1550 		}
1551 	}
1552 
1553 	if (status & XM_IMR_TX_UNDERRUN)
1554 		SK_XM_SETBIT_4(sc_if, XM_MODE, XM_MODE_FLUSH_TXFIFO);
1555 
1556 	if (status & XM_IMR_RX_OVERRUN)
1557 		SK_XM_SETBIT_4(sc_if, XM_MODE, XM_MODE_FLUSH_RXFIFO);
1558 
1559 	return;
1560 }
1561 
1562 int sk_intr(xsc)
1563 	void			*xsc;
1564 {
1565 	struct sk_softc		*sc = xsc;
1566 	struct sk_if_softc	*sc_if0 = NULL, *sc_if1 = NULL;
1567 	struct ifnet		*ifp0 = NULL, *ifp1 = NULL;
1568 	u_int32_t		status;
1569 	int			claimed = 0;
1570 
1571 	sc_if0 = sc->sk_if[SK_PORT_A];
1572 	sc_if1 = sc->sk_if[SK_PORT_B];
1573 
1574 	if (sc_if0 != NULL)
1575 		ifp0 = &sc_if0->arpcom.ac_if;
1576 	if (sc_if1 != NULL)
1577 		ifp1 = &sc_if1->arpcom.ac_if;
1578 
1579 	for (;;) {
1580 		status = CSR_READ_4(sc, SK_ISSR);
1581 		if (!(status & sc->sk_intrmask))
1582 			break;
1583 
1584 		claimed = 1;
1585 
1586 		/* Handle receive interrupts first. */
1587 		if (status & SK_ISR_RX1_EOF) {
1588 			sk_rxeof(sc_if0);
1589 			CSR_WRITE_4(sc, SK_BMU_RX_CSR0,
1590 			    SK_RXBMU_CLR_IRQ_EOF|SK_RXBMU_RX_START);
1591 		}
1592 		if (status & SK_ISR_RX2_EOF) {
1593 			sk_rxeof(sc_if1);
1594 			CSR_WRITE_4(sc, SK_BMU_RX_CSR1,
1595 			    SK_RXBMU_CLR_IRQ_EOF|SK_RXBMU_RX_START);
1596 		}
1597 
1598 		/* Then transmit interrupts. */
1599 		if (status & SK_ISR_TX1_S_EOF) {
1600 			sk_txeof(sc_if0);
1601 			CSR_WRITE_4(sc, SK_BMU_TXS_CSR0,
1602 			    SK_TXBMU_CLR_IRQ_EOF);
1603 		}
1604 		if (status & SK_ISR_TX2_S_EOF) {
1605 			sk_txeof(sc_if1);
1606 			CSR_WRITE_4(sc, SK_BMU_TXS_CSR1,
1607 			    SK_TXBMU_CLR_IRQ_EOF);
1608 		}
1609 
1610 		/* Then MAC interrupts. */
1611 		if (status & SK_ISR_MAC1 &&
1612 		    ifp0->if_flags & IFF_RUNNING)
1613 			sk_intr_xmac(sc_if0);
1614 
1615 		if (status & SK_ISR_MAC2 &&
1616 		    ifp1->if_flags & IFF_RUNNING)
1617 			sk_intr_xmac(sc_if1);
1618 
1619 		if (status & SK_ISR_EXTERNAL_REG) {
1620 			if (ifp0 != NULL)
1621 				sk_intr_bcom(sc_if0);
1622 			if (ifp1 != NULL)
1623 				sk_intr_bcom(sc_if1);
1624 		}
1625 	}
1626 
1627 	CSR_WRITE_4(sc, SK_IMR, sc->sk_intrmask);
1628 
1629 	if (ifp0 != NULL && ifp0->if_snd.ifq_head != NULL)
1630 		sk_start(ifp0);
1631 	if (ifp1 != NULL && ifp1->if_snd.ifq_head != NULL)
1632 		sk_start(ifp1);
1633 
1634 	return (claimed);
1635 }
1636 
1637 void sk_init_xmac(sc_if)
1638 	struct sk_if_softc	*sc_if;
1639 {
1640 	struct sk_softc		*sc;
1641 	struct ifnet		*ifp;
1642 	struct sk_bcom_hack     bhack[] = {
1643 	{ 0x18, 0x0c20 }, { 0x17, 0x0012 }, { 0x15, 0x1104 }, { 0x17, 0x0013 },
1644 	{ 0x15, 0x0404 }, { 0x17, 0x8006 }, { 0x15, 0x0132 }, { 0x17, 0x8006 },
1645 	{ 0x15, 0x0232 }, { 0x17, 0x800D }, { 0x15, 0x000F }, { 0x18, 0x0420 },
1646 	{ 0, 0 } };
1647 
1648 	sc = sc_if->sk_softc;
1649 	ifp = &sc_if->arpcom.ac_if;
1650 
1651 	/* Unreset the XMAC. */
1652 	SK_IF_WRITE_2(sc_if, 0, SK_TXF1_MACCTL, SK_TXMACCTL_XMAC_UNRESET);
1653 	DELAY(1000);
1654 
1655 	/* Reset the XMAC's internal state. */
1656 	SK_XM_SETBIT_2(sc_if, XM_GPIO, XM_GPIO_RESETMAC);
1657 
1658 	/* Save the XMAC II revision */
1659 	sc_if->sk_xmac_rev = XM_XMAC_REV(SK_XM_READ_4(sc_if, XM_DEVID));
1660 
1661 	/*
1662 	 * Perform additional initialization for external PHYs,
1663 	 * namely for the 1000baseTX cards that use the XMAC's
1664 	 * GMII mode.
1665 	 */
1666 	if (sc_if->sk_phytype == SK_PHYTYPE_BCOM) {
1667 		int			i = 0;
1668 		u_int32_t		val;
1669 
1670 		/* Take PHY out of reset. */
1671 		val = sk_win_read_4(sc, SK_GPIO);
1672 		if (sc_if->sk_port == SK_PORT_A)
1673 			val |= SK_GPIO_DIR0|SK_GPIO_DAT0;
1674 		else
1675 			val |= SK_GPIO_DIR2|SK_GPIO_DAT2;
1676 		sk_win_write_4(sc, SK_GPIO, val);
1677 
1678 		/* Enable GMII mode on the XMAC. */
1679 		SK_XM_SETBIT_2(sc_if, XM_HWCFG, XM_HWCFG_GMIIMODE);
1680 
1681 		sk_miibus_writereg((struct device *)sc_if, SK_PHYADDR_BCOM,
1682 		    BRGPHY_MII_BMCR, BRGPHY_BMCR_RESET);
1683 		DELAY(10000);
1684 		sk_miibus_writereg((struct device *)sc_if, SK_PHYADDR_BCOM,
1685 		    BRGPHY_MII_IMR, 0xFFF0);
1686 
1687 		/*
1688 		 * Early versions of the BCM5400 apparently have
1689 		 * a bug that requires them to have their reserved
1690 		 * registers initialized to some magic values. I don't
1691 		 * know what the numbers do, I'm just the messenger.
1692 		 */
1693 		if (sk_miibus_readreg((struct device *)sc_if,
1694 		    SK_PHYADDR_BCOM, 0x03) == 0x6041) {
1695 			while(bhack[i].reg) {
1696 				sk_miibus_writereg((struct device *)sc_if,
1697 				    SK_PHYADDR_BCOM, bhack[i].reg,
1698 				    bhack[i].val);
1699 				i++;
1700 			}
1701 		}
1702 	}
1703 
1704 	/* Set station address */
1705 	SK_XM_WRITE_2(sc_if, XM_PAR0,
1706 	    *(u_int16_t *)(&sc_if->arpcom.ac_enaddr[0]));
1707 	SK_XM_WRITE_2(sc_if, XM_PAR1,
1708 	    *(u_int16_t *)(&sc_if->arpcom.ac_enaddr[2]));
1709 	SK_XM_WRITE_2(sc_if, XM_PAR2,
1710 	    *(u_int16_t *)(&sc_if->arpcom.ac_enaddr[4]));
1711 	SK_XM_SETBIT_4(sc_if, XM_MODE, XM_MODE_RX_USE_STATION);
1712 
1713 	if (ifp->if_flags & IFF_PROMISC) {
1714 		SK_XM_SETBIT_4(sc_if, XM_MODE, XM_MODE_RX_PROMISC);
1715 	} else {
1716 		SK_XM_CLRBIT_4(sc_if, XM_MODE, XM_MODE_RX_PROMISC);
1717 	}
1718 
1719 	if (ifp->if_flags & IFF_BROADCAST) {
1720 		SK_XM_CLRBIT_4(sc_if, XM_MODE, XM_MODE_RX_NOBROAD);
1721 	} else {
1722 		SK_XM_SETBIT_4(sc_if, XM_MODE, XM_MODE_RX_NOBROAD);
1723 	}
1724 
1725 	/* We don't need the FCS appended to the packet. */
1726 	SK_XM_SETBIT_2(sc_if, XM_RXCMD, XM_RXCMD_STRIPFCS);
1727 
1728 	/* We want short frames padded to 60 bytes. */
1729 	SK_XM_SETBIT_2(sc_if, XM_TXCMD, XM_TXCMD_AUTOPAD);
1730 
1731 	/*
1732 	 * Enable the reception of all error frames. This is is
1733 	 * a necessary evil due to the design of the XMAC. The
1734 	 * XMAC's receive FIFO is only 8K in size, however jumbo
1735 	 * frames can be up to 9000 bytes in length. When bad
1736 	 * frame filtering is enabled, the XMAC's RX FIFO operates
1737 	 * in 'store and forward' mode. For this to work, the
1738 	 * entire frame has to fit into the FIFO, but that means
1739 	 * that jumbo frames larger than 8192 bytes will be
1740 	 * truncated. Disabling all bad frame filtering causes
1741 	 * the RX FIFO to operate in streaming mode, in which
1742 	 * case the XMAC will start transfering frames out of the
1743 	 * RX FIFO as soon as the FIFO threshold is reached.
1744 	 */
1745 	SK_XM_SETBIT_4(sc_if, XM_MODE, XM_MODE_RX_BADFRAMES|
1746 	    XM_MODE_RX_GIANTS|XM_MODE_RX_RUNTS|XM_MODE_RX_CRCERRS|
1747 	    XM_MODE_RX_INRANGELEN);
1748 
1749 	if (ifp->if_mtu > (ETHERMTU + ETHER_HDR_LEN + ETHER_CRC_LEN))
1750 		SK_XM_SETBIT_2(sc_if, XM_RXCMD, XM_RXCMD_BIGPKTOK);
1751 	else
1752 		SK_XM_CLRBIT_2(sc_if, XM_RXCMD, XM_RXCMD_BIGPKTOK);
1753 
1754 	/*
1755 	 * Bump up the transmit threshold. This helps hold off transmit
1756 	 * underruns when we're blasting traffic from both ports at once.
1757 	 */
1758 	SK_XM_WRITE_2(sc_if, XM_TX_REQTHRESH, SK_XM_TX_FIFOTHRESH);
1759 
1760 	/* Set multicast filter */
1761 	sk_setmulti(sc_if);
1762 
1763 	/* Clear and enable interrupts */
1764 	SK_XM_READ_2(sc_if, XM_ISR);
1765 	if (sc_if->sk_phytype == SK_PHYTYPE_XMAC)
1766 		SK_XM_WRITE_2(sc_if, XM_IMR, XM_INTRS);
1767 	else
1768 		SK_XM_WRITE_2(sc_if, XM_IMR, 0xFFFF);
1769 
1770 	/* Configure MAC arbiter */
1771 	switch(sc_if->sk_xmac_rev) {
1772 	case XM_XMAC_REV_B2:
1773 		sk_win_write_1(sc, SK_RCINIT_RX1, SK_RCINIT_XMAC_B2);
1774 		sk_win_write_1(sc, SK_RCINIT_TX1, SK_RCINIT_XMAC_B2);
1775 		sk_win_write_1(sc, SK_RCINIT_RX2, SK_RCINIT_XMAC_B2);
1776 		sk_win_write_1(sc, SK_RCINIT_TX2, SK_RCINIT_XMAC_B2);
1777 		sk_win_write_1(sc, SK_MINIT_RX1, SK_MINIT_XMAC_B2);
1778 		sk_win_write_1(sc, SK_MINIT_TX1, SK_MINIT_XMAC_B2);
1779 		sk_win_write_1(sc, SK_MINIT_RX2, SK_MINIT_XMAC_B2);
1780 		sk_win_write_1(sc, SK_MINIT_TX2, SK_MINIT_XMAC_B2);
1781 		sk_win_write_1(sc, SK_RECOVERY_CTL, SK_RECOVERY_XMAC_B2);
1782 		break;
1783 	case XM_XMAC_REV_C1:
1784 		sk_win_write_1(sc, SK_RCINIT_RX1, SK_RCINIT_XMAC_C1);
1785 		sk_win_write_1(sc, SK_RCINIT_TX1, SK_RCINIT_XMAC_C1);
1786 		sk_win_write_1(sc, SK_RCINIT_RX2, SK_RCINIT_XMAC_C1);
1787 		sk_win_write_1(sc, SK_RCINIT_TX2, SK_RCINIT_XMAC_C1);
1788 		sk_win_write_1(sc, SK_MINIT_RX1, SK_MINIT_XMAC_C1);
1789 		sk_win_write_1(sc, SK_MINIT_TX1, SK_MINIT_XMAC_C1);
1790 		sk_win_write_1(sc, SK_MINIT_RX2, SK_MINIT_XMAC_C1);
1791 		sk_win_write_1(sc, SK_MINIT_TX2, SK_MINIT_XMAC_C1);
1792 		sk_win_write_1(sc, SK_RECOVERY_CTL, SK_RECOVERY_XMAC_B2);
1793 		break;
1794 	default:
1795 		break;
1796 	}
1797 	sk_win_write_2(sc, SK_MACARB_CTL,
1798 	    SK_MACARBCTL_UNRESET|SK_MACARBCTL_FASTOE_OFF);
1799 
1800 	sc_if->sk_link = 1;
1801 
1802 	return;
1803 }
1804 
1805 /*
1806  * Note that to properly initialize any part of the GEnesis chip,
1807  * you first have to take it out of reset mode.
1808  */
1809 void sk_init(xsc)
1810 	void			*xsc;
1811 {
1812 	struct sk_if_softc	*sc_if = xsc;
1813 	struct sk_softc		*sc;
1814 	struct ifnet		*ifp;
1815 	struct mii_data		*mii;
1816 	int			s;
1817 
1818 	s = splimp();
1819 
1820 	ifp = &sc_if->arpcom.ac_if;
1821 	sc = sc_if->sk_softc;
1822 	mii = &sc_if->sk_mii;
1823 
1824 	/* Cancel pending I/O and free all RX/TX buffers. */
1825 	sk_stop(sc_if);
1826 
1827 	/* Configure LINK_SYNC LED */
1828 	SK_IF_WRITE_1(sc_if, 0, SK_LINKLED1_CTL, SK_LINKLED_ON);
1829 	SK_IF_WRITE_1(sc_if, 0, SK_LINKLED1_CTL, SK_LINKLED_LINKSYNC_ON);
1830 
1831 	/* Configure RX LED */
1832 	SK_IF_WRITE_1(sc_if, 0, SK_RXLED1_CTL, SK_RXLEDCTL_COUNTER_START);
1833 
1834 	/* Configure TX LED */
1835 	SK_IF_WRITE_1(sc_if, 0, SK_TXLED1_CTL, SK_TXLEDCTL_COUNTER_START);
1836 
1837 	/* Configure I2C registers */
1838 
1839 	/* Configure XMAC(s) */
1840 	sk_init_xmac(sc_if);
1841 	mii_mediachg(mii);
1842 
1843 	/* Configure MAC FIFOs */
1844 	SK_IF_WRITE_4(sc_if, 0, SK_RXF1_CTL, SK_FIFO_UNRESET);
1845 	SK_IF_WRITE_4(sc_if, 0, SK_RXF1_END, SK_FIFO_END);
1846 	SK_IF_WRITE_4(sc_if, 0, SK_RXF1_CTL, SK_FIFO_ON);
1847 
1848 	SK_IF_WRITE_4(sc_if, 0, SK_TXF1_CTL, SK_FIFO_UNRESET);
1849 	SK_IF_WRITE_4(sc_if, 0, SK_TXF1_END, SK_FIFO_END);
1850 	SK_IF_WRITE_4(sc_if, 0, SK_TXF1_CTL, SK_FIFO_ON);
1851 
1852 	/* Configure transmit arbiter(s) */
1853 	SK_IF_WRITE_1(sc_if, 0, SK_TXAR1_COUNTERCTL,
1854 	    SK_TXARCTL_ON|SK_TXARCTL_FSYNC_ON);
1855 
1856 	/* Configure RAMbuffers */
1857 	SK_IF_WRITE_4(sc_if, 0, SK_RXRB1_CTLTST, SK_RBCTL_UNRESET);
1858 	SK_IF_WRITE_4(sc_if, 0, SK_RXRB1_START, sc_if->sk_rx_ramstart);
1859 	SK_IF_WRITE_4(sc_if, 0, SK_RXRB1_WR_PTR, sc_if->sk_rx_ramstart);
1860 	SK_IF_WRITE_4(sc_if, 0, SK_RXRB1_RD_PTR, sc_if->sk_rx_ramstart);
1861 	SK_IF_WRITE_4(sc_if, 0, SK_RXRB1_END, sc_if->sk_rx_ramend);
1862 	SK_IF_WRITE_4(sc_if, 0, SK_RXRB1_CTLTST, SK_RBCTL_ON);
1863 
1864 	SK_IF_WRITE_4(sc_if, 1, SK_TXRBS1_CTLTST, SK_RBCTL_UNRESET);
1865 	SK_IF_WRITE_4(sc_if, 1, SK_TXRBS1_CTLTST, SK_RBCTL_STORENFWD_ON);
1866 	SK_IF_WRITE_4(sc_if, 1, SK_TXRBS1_START, sc_if->sk_tx_ramstart);
1867 	SK_IF_WRITE_4(sc_if, 1, SK_TXRBS1_WR_PTR, sc_if->sk_tx_ramstart);
1868 	SK_IF_WRITE_4(sc_if, 1, SK_TXRBS1_RD_PTR, sc_if->sk_tx_ramstart);
1869 	SK_IF_WRITE_4(sc_if, 1, SK_TXRBS1_END, sc_if->sk_tx_ramend);
1870 	SK_IF_WRITE_4(sc_if, 1, SK_TXRBS1_CTLTST, SK_RBCTL_ON);
1871 
1872 	/* Configure BMUs */
1873 	SK_IF_WRITE_4(sc_if, 0, SK_RXQ1_BMU_CSR, SK_RXBMU_ONLINE);
1874 	SK_IF_WRITE_4(sc_if, 0, SK_RXQ1_CURADDR_LO,
1875 	    vtophys(&sc_if->sk_rdata->sk_rx_ring[0]));
1876 	SK_IF_WRITE_4(sc_if, 0, SK_RXQ1_CURADDR_HI, 0);
1877 
1878 	SK_IF_WRITE_4(sc_if, 1, SK_TXQS1_BMU_CSR, SK_TXBMU_ONLINE);
1879 	SK_IF_WRITE_4(sc_if, 1, SK_TXQS1_CURADDR_LO,
1880 	    vtophys(&sc_if->sk_rdata->sk_tx_ring[0]));
1881 	SK_IF_WRITE_4(sc_if, 1, SK_TXQS1_CURADDR_HI, 0);
1882 
1883 	/* Init descriptors */
1884 	if (sk_init_rx_ring(sc_if) == ENOBUFS) {
1885 		printf("%s: initialization failed: no "
1886 		    "memory for rx buffers\n", sc_if->sk_dev.dv_xname);
1887 		sk_stop(sc_if);
1888 		(void)splx(s);
1889 		return;
1890 	}
1891 	sk_init_tx_ring(sc_if);
1892 
1893 	/* Configure interrupt handling */
1894 	CSR_READ_4(sc, SK_ISSR);
1895 	if (sc_if->sk_port == SK_PORT_A)
1896 		sc->sk_intrmask |= SK_INTRS1;
1897 	else
1898 		sc->sk_intrmask |= SK_INTRS2;
1899 
1900 	sc->sk_intrmask |= SK_ISR_EXTERNAL_REG;
1901 
1902 	CSR_WRITE_4(sc, SK_IMR, sc->sk_intrmask);
1903 
1904 	/* Start BMUs. */
1905 	SK_IF_WRITE_4(sc_if, 0, SK_RXQ1_BMU_CSR, SK_RXBMU_RX_START);
1906 
1907 	/* Enable XMACs TX and RX state machines */
1908 	SK_XM_CLRBIT_2(sc_if, XM_MMUCMD, XM_MMUCMD_IGNPAUSE);
1909 	SK_XM_SETBIT_2(sc_if, XM_MMUCMD, XM_MMUCMD_TX_ENB|XM_MMUCMD_RX_ENB);
1910 
1911 	ifp->if_flags |= IFF_RUNNING;
1912 	ifp->if_flags &= ~IFF_OACTIVE;
1913 
1914 	splx(s);
1915 
1916 	return;
1917 }
1918 
1919 void sk_stop(sc_if)
1920 	struct sk_if_softc	*sc_if;
1921 {
1922 	int			i;
1923 	struct sk_softc		*sc;
1924 	struct ifnet		*ifp;
1925 
1926 	sc = sc_if->sk_softc;
1927 	ifp = &sc_if->arpcom.ac_if;
1928 
1929 	timeout_del(&sc_if->sk_tick_ch);
1930 
1931 	if (sc_if->sk_phytype == SK_PHYTYPE_BCOM) {
1932 		u_int32_t		val;
1933 
1934 		/* Put PHY back into reset. */
1935 		val = sk_win_read_4(sc, SK_GPIO);
1936 		if (sc_if->sk_port == SK_PORT_A) {
1937 			val |= SK_GPIO_DIR0;
1938 			val &= ~SK_GPIO_DAT0;
1939 		} else {
1940 			val |= SK_GPIO_DIR2;
1941 			val &= ~SK_GPIO_DAT2;
1942 		}
1943 		sk_win_write_4(sc, SK_GPIO, val);
1944 	}
1945 
1946 	/* Turn off various components of this interface. */
1947 	SK_XM_SETBIT_2(sc_if, XM_GPIO, XM_GPIO_RESETMAC);
1948 	SK_IF_WRITE_2(sc_if, 0, SK_TXF1_MACCTL, SK_TXMACCTL_XMAC_RESET);
1949 	SK_IF_WRITE_4(sc_if, 0, SK_RXF1_CTL, SK_FIFO_RESET);
1950 	SK_IF_WRITE_4(sc_if, 0, SK_RXQ1_BMU_CSR, SK_RXBMU_OFFLINE);
1951 	SK_IF_WRITE_4(sc_if, 0, SK_RXRB1_CTLTST, SK_RBCTL_RESET|SK_RBCTL_OFF);
1952 	SK_IF_WRITE_4(sc_if, 1, SK_TXQS1_BMU_CSR, SK_TXBMU_OFFLINE);
1953 	SK_IF_WRITE_4(sc_if, 1, SK_TXRBS1_CTLTST, SK_RBCTL_RESET|SK_RBCTL_OFF);
1954 	SK_IF_WRITE_1(sc_if, 0, SK_TXAR1_COUNTERCTL, SK_TXARCTL_OFF);
1955 	SK_IF_WRITE_1(sc_if, 0, SK_RXLED1_CTL, SK_RXLEDCTL_COUNTER_STOP);
1956 	SK_IF_WRITE_1(sc_if, 0, SK_TXLED1_CTL, SK_RXLEDCTL_COUNTER_STOP);
1957 	SK_IF_WRITE_1(sc_if, 0, SK_LINKLED1_CTL, SK_LINKLED_OFF);
1958 	SK_IF_WRITE_1(sc_if, 0, SK_LINKLED1_CTL, SK_LINKLED_LINKSYNC_OFF);
1959 
1960 	/* Disable interrupts */
1961 	if (sc_if->sk_port == SK_PORT_A)
1962 		sc->sk_intrmask &= ~SK_INTRS1;
1963 	else
1964 		sc->sk_intrmask &= ~SK_INTRS2;
1965 	CSR_WRITE_4(sc, SK_IMR, sc->sk_intrmask);
1966 
1967 	SK_XM_READ_2(sc_if, XM_ISR);
1968 	SK_XM_WRITE_2(sc_if, XM_IMR, 0xFFFF);
1969 
1970 	/* Free RX and TX mbufs still in the queues. */
1971 	for (i = 0; i < SK_RX_RING_CNT; i++) {
1972 		if (sc_if->sk_cdata.sk_rx_chain[i].sk_mbuf != NULL) {
1973 			m_freem(sc_if->sk_cdata.sk_rx_chain[i].sk_mbuf);
1974 			sc_if->sk_cdata.sk_rx_chain[i].sk_mbuf = NULL;
1975 		}
1976 	}
1977 
1978 	for (i = 0; i < SK_TX_RING_CNT; i++) {
1979 		if (sc_if->sk_cdata.sk_tx_chain[i].sk_mbuf != NULL) {
1980 			m_freem(sc_if->sk_cdata.sk_tx_chain[i].sk_mbuf);
1981 			sc_if->sk_cdata.sk_tx_chain[i].sk_mbuf = NULL;
1982 		}
1983 	}
1984 
1985 	ifp->if_flags &= ~(IFF_RUNNING|IFF_OACTIVE);
1986 
1987 	return;
1988 }
1989 
1990 struct cfattach skc_ca = {
1991 	sizeof(struct sk_softc), skc_probe, skc_attach,
1992 };
1993 
1994 struct cfdriver skc_cd = {
1995 	0, "skc", DV_DULL
1996 };
1997 
1998 struct cfattach sk_ca = {
1999 	sizeof(struct sk_if_softc), sk_probe, sk_attach,
2000 };
2001 
2002 struct cfdriver sk_cd = {
2003 	0, "sk", DV_IFNET
2004 };
2005